hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
72395e4c87b9f1d8044b25a00bbab7ea6fe4633a | 337 | py | Python | tests/graphql/readme_forum/readme_forum_permissions/models.py | karlosss/simple_api | 03f87035c648f161d5e7a59b24f4e04bd34399f1 | [
"MIT"
]
| 2 | 2020-11-13T14:00:06.000Z | 2020-12-19T11:50:22.000Z | tests/graphql/readme_forum/readme_forum_permissions/models.py | ladal1/simple_api | 1b5d560476bccad9f68a7331d092dbdb68c48bf7 | [
"MIT"
]
| 5 | 2021-02-04T14:27:43.000Z | 2021-06-04T23:22:24.000Z | tests/graphql/readme_forum/readme_forum_permissions/models.py | ladal1/simple_api | 1b5d560476bccad9f68a7331d092dbdb68c48bf7 | [
"MIT"
]
| 1 | 2021-01-06T13:54:38.000Z | 2021-01-06T13:54:38.000Z | from django.contrib.auth.models import User
from django.db.models import Model, CharField, TextField, ForeignKey, CASCADE
class Post(Model):
title = CharField(max_length=50)
author = ForeignKey(User, on_delete=CASCADE)
content = TextField()
def __str__(self):
return "{} by {}".format(self.title, self.author)
| 28.083333 | 77 | 0.712166 | 212 | 0.62908 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.029674 |
723b9095a8d15e2c9c1b3f5d5be4c81a6f6e858e | 2,304 | py | Python | streamlit_app.py | fhebal/nlp-medical-notes | f1fed9e34ba47da14220b5719f28c1e720302f45 | [
"MIT"
]
| null | null | null | streamlit_app.py | fhebal/nlp-medical-notes | f1fed9e34ba47da14220b5719f28c1e720302f45 | [
"MIT"
]
| null | null | null | streamlit_app.py | fhebal/nlp-medical-notes | f1fed9e34ba47da14220b5719f28c1e720302f45 | [
"MIT"
]
| null | null | null | import streamlit as st
import yaml
from load_css import local_css
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text as text
import numpy as np
from random import sample
import os
local_css("style.css")
prediction_key = {
0:'Gastroenterology',
1:'Neurology',
2:'Orthopedic',
3:'Radiology',
4:'Urology'
}
class Highlighter():
def __init__(self):
self.start = "<span class='highlight blue'><span class='bold'>"
self.end = "</span></span>"
def highlight_match(self, text, config):
for value in config:
text = text.replace(" "+value+" ", "{0}"+value+"{1}")
text = "<div>" + text.format(self.start, self.end) + "</div>"
return text
# Load model from file
model = tf.keras.models.load_model('/home/muody/saved_model/my_model', compile=False)
# load data
def load_data():
data_path = '/home/muody/data/medicalnotes/dataset/unlabeled-test-data/'
files = os.listdir(data_path)
sample_file = data_path + sample(files, 1)[0]
with open(sample_file, 'r') as stream:
sample_data = stream.read()
sample_data = sample_data.replace('\n','')
sample_data = sample_data.replace('</B>','')
sample_data = sample_data.replace('<B>','')
return sample_data
def main():
# INPUT DATA
#sample = st.text_input('Input your sentence here:')
sample = load_data()
prediction_arr = tf.sigmoid(model.predict(tf.convert_to_tensor([sample]))).numpy()
prediction_num = np.argmax(prediction_arr)
prediction = prediction_key[prediction_num]
prediction_text = "<div>Prediction: <span class='highlight red'><span class='bold'>" + prediction + '</span></span></div>'
st.markdown(prediction_text, unsafe_allow_html=True)
st.write('\n')
for key, value in prediction_key.items():
st.write(value, prediction_arr[0][key])
label = prediction_num
with open("config/{}.yaml".format(label), 'r') as stream:
try:
config = stream.read().splitlines()
except yaml.YAMLError as exc:
print(exc)
highlighter = Highlighter()
t = highlighter.highlight_match(sample, config)
st.markdown(t, unsafe_allow_html=True)
if st.button("New Text Sample"):
main()
| 29.538462 | 126 | 0.647569 | 382 | 0.165799 | 0 | 0 | 0 | 0 | 0 | 0 | 512 | 0.222222 |
723d7e8a6d6158d63e1b5536dbcf3fd946d29dec | 2,440 | py | Python | tests/test_functions.py | aerial-defence/pytak | e20c2dedfee88489bf21ad931970c2cb982d72ed | [
"Apache-2.0"
]
| null | null | null | tests/test_functions.py | aerial-defence/pytak | e20c2dedfee88489bf21ad931970c2cb982d72ed | [
"Apache-2.0"
]
| null | null | null | tests/test_functions.py | aerial-defence/pytak | e20c2dedfee88489bf21ad931970c2cb982d72ed | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Python Team Awareness Kit (PyTAK) Module Tests."""
import asyncio
import urllib
import pytest
import pytak
__author__ = 'Greg Albrecht W2GMD <[email protected]>'
__copyright__ = 'Copyright 2022 Greg Albrecht'
__license__ = 'Apache License, Version 2.0'
def test_parse_cot_url_https_noport():
test_url1: str = 'https://www.example.com/'
cot_url1: urllib.parse.ParseResult = urllib.parse.urlparse(test_url1)
host1, port1 = pytak.parse_cot_url(cot_url1)
assert 'www.example.com' == host1
assert 8087 == port1
def test_parse_cot_url_tls_noport():
test_url1: str = 'tls://www.example.com'
cot_url1: urllib.parse.ParseResult = urllib.parse.urlparse(test_url1)
host1, port1 = pytak.parse_cot_url(cot_url1)
assert 'www.example.com' == host1
assert 8087 == port1
def test_parse_cot_url_udp_port():
test_url1: str = 'udp://www.example.com:9999'
cot_url1: urllib.parse.ParseResult = urllib.parse.urlparse(test_url1)
host1, port1 = pytak.parse_cot_url(cot_url1)
assert 'www.example.com' == host1
assert 9999 == port1
def test_parse_cot_url_udp_broadcast():
test_url1: str = 'udp+broadcast://www.example.com'
cot_url1: urllib.parse.ParseResult = urllib.parse.urlparse(test_url1)
host1, port1 = pytak.parse_cot_url(cot_url1)
assert 'www.example.com' == host1
assert 6969 == port1
def test_split_host():
test_host1 = 'www.example.com'
test_port1 = '9999'
combined_host_port = ':'.join([test_host1, test_port1])
addr, port = pytak.split_host(combined_host_port)
assert 'www.example.com' == addr
assert 9999 == port
def test_split_host_port():
test_host1 = 'www.example.com'
test_port1 = '9999'
addr, port = pytak.split_host(test_host1, test_port1)
assert 'www.example.com' == addr
assert 9999 == port
def test_split_host_only():
test_host1 = 'www.example.com'
addr, port = pytak.split_host(test_host1)
assert 'www.example.com' == addr
assert pytak.DEFAULT_COT_PORT == port
def test_split_host():
test_host1 = 'www.example.com'
test_port1 = '9999'
combined_host_port = ':'.join([test_host1, test_port1])
addr, port = pytak.split_host(combined_host_port)
assert 'www.example.com' == addr
assert 9999 == port
def test_hello_event():
event = pytak.hello_event('taco')
assert b'taco' in event
assert b't-x-d-d' in event | 29.047619 | 73 | 0.702049 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 554 | 0.227049 |
723d802339794483e5614abac1a27413e8db4aa8 | 1,054 | py | Python | tests/test_aoc_day_02.py | ladokp/AdventOfCode2021 | 03f8b9f8579ae562d5f2784a131370a32ed19f8b | [
"BSD-2-Clause"
]
| null | null | null | tests/test_aoc_day_02.py | ladokp/AdventOfCode2021 | 03f8b9f8579ae562d5f2784a131370a32ed19f8b | [
"BSD-2-Clause"
]
| null | null | null | tests/test_aoc_day_02.py | ladokp/AdventOfCode2021 | 03f8b9f8579ae562d5f2784a131370a32ed19f8b | [
"BSD-2-Clause"
]
| null | null | null | # test_aoc_day_02.py
import pytest
import solution.aoc_day_02 as aoc
@pytest.fixture
def test_solution():
return aoc.AocSolution(test_suffix="_test")
@pytest.fixture
def exercise_solution():
return aoc.AocSolution()
def test_parse_test_solution(test_solution):
"""Test that input is parsed properly"""
assert test_solution.data == [
("forward", 5),
("down", 5),
("forward", 8),
("up", 3),
("down", 8),
("forward", 2),
]
def test_part1_test_solution(test_solution):
"""Test part 1 on example input"""
assert test_solution.part1() == 150
def test_part2_test_solution(test_solution):
"""Test part 2 on example input"""
assert test_solution.part2() == 900
def test_part1_exercise_solution(exercise_solution):
"""Test part 1 on exercise_solution input"""
assert exercise_solution.part1() == 1383564
def test_part2_exercise_solution(exercise_solution):
"""Test part 2 on exercise_solution input"""
assert exercise_solution.part2() == 1488311643
| 22.425532 | 52 | 0.683112 | 0 | 0 | 0 | 0 | 153 | 0.145161 | 0 | 0 | 266 | 0.252372 |
723e3c60c657572c4703c5d71bdcbccb656fe914 | 18,265 | py | Python | src/elora/elora.py | morelandjs/elora | e902c40d66b0bf95a8d2374afa0cc165b87c9b82 | [
"MIT"
]
| 1 | 2021-07-26T20:36:32.000Z | 2021-07-26T20:36:32.000Z | src/elora/elora.py | morelandjs/elora | e902c40d66b0bf95a8d2374afa0cc165b87c9b82 | [
"MIT"
]
| null | null | null | src/elora/elora.py | morelandjs/elora | e902c40d66b0bf95a8d2374afa0cc165b87c9b82 | [
"MIT"
]
| null | null | null | from operator import add, sub
import numpy as np
from scipy.stats import norm
class Elora:
def __init__(self, times, labels1, labels2, values, biases=0):
"""
Elo regressor algorithm for paired comparison time series prediction
Author: J. Scott Moreland
Args:
times (array of np.datetime64): comparison datetimes
labels1 (array of str): comparison labels for first entity
labels2 (array of str): comparison labels for second entity
values (array of float): comparison outcome values
biases (array of float or scalar, optional): comparison bias
corrections
Attributes:
examples (np.recarray): time-sorted numpy record array of
(time, label1, label2, bias, value, value_pred) samples
first_update_time (np.datetime64): time of the first comparison
last_update_time (np.datetime64): time of the last comparison
labels (array of string): unique compared entity labels
median_value (float): median expected comparison value
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels1 = np.array(labels1, dtype='str', ndmin=1)
labels2 = np.array(labels2, dtype='str', ndmin=1)
values = np.array(values, dtype='float', ndmin=1)
if np.isscalar(biases):
biases = np.full_like(times, biases, dtype='float')
else:
biases = np.array(biases, dtype='float', ndmin=1)
self.first_update_time = times.min()
self.last_update_time = times.max()
self.labels = np.union1d(labels1, labels2)
self.median_value = np.median(values)
prior = self.median_value * np.ones_like(values, dtype=float)
self.examples = np.sort(
np.rec.fromarrays([
times,
labels1,
labels2,
biases,
values,
prior,
], names=(
'time',
'label1',
'label2',
'bias',
'value',
'value_pred'
)), order=['time', 'label1', 'label2'], axis=0)
@property
def initial_rating(self):
"""
Customize this function for a given subclass.
It computes the initial rating, equal to the rating one would
expect if all labels were interchangeable.
Default behavior is to return one-half the median outcome value
if the labels commute, otherwise 0.
"""
return .5*self.median_value if self.commutes else 0
def regression_coeff(self, elapsed_time):
"""
Customize this function for a given subclass.
It computes the regression coefficient—prefactor multiplying the
rating of each team evaluated at each update—as a function of
elapsed time since the last rating update for that label.
Default behavior is to return 1, i.e. no rating regression.
"""
return 1.0
def evolve_rating(self, rating, elapsed_time):
"""
Evolves 'state' to 'time', applying rating regression if necessary,
and returns the evolved rating.
Args:
state (dict): state dictionary {'time': time, 'rating': rating}
time (np.datetime64): time to evaluate state
Returns:
state (dict): evolved state dictionary
{'time': time, 'rating': rating}
"""
regress = self.regression_coeff(elapsed_time)
return regress * rating + (1.0 - regress) * self.initial_rating
def fit(self, k, commutes, scale=1, burnin=0):
"""
Primary routine that performs model calibration. It is called
recursively by the `fit` routine.
Args:
k (float): coefficient that multiplies the prediction error to
determine the rating update.
commutes (bool): false if the observed values change sign under
label interchange and true otheriwse.
"""
self.commutes = commutes
self.scale = scale
self.commutator = 0. if commutes else self.median_value
self.compare = add if commutes else sub
record = {label: [] for label in self.labels}
prior_state_dict = {}
for idx, example in enumerate(self.examples):
time, label1, label2, bias, value, value_pred = example
default = (time, self.initial_rating)
prior_time1, prior_rating1 = prior_state_dict.get(label1, default)
prior_time2, prior_rating2 = prior_state_dict.get(label2, default)
rating1 = self.evolve_rating(prior_rating1, time - prior_time1)
rating2 = self.evolve_rating(prior_rating2, time - prior_time2)
value_pred = self.compare(rating1, rating2) + self.commutator + bias
self.examples[idx]['value_pred'] = value_pred
rating_change = k * (value - value_pred)
rating1 += rating_change
rating2 += rating_change if self.commutes else -rating_change
record[label1].append((time, rating1))
record[label2].append((time, rating2))
prior_state_dict[label1] = (time, rating1)
prior_state_dict[label2] = (time, rating2)
for label in record.keys():
record[label] = np.rec.array(
record[label], dtype=[
('time', 'datetime64[s]'), ('rating', 'float')])
self.record = record
residuals = np.rec.fromarrays([
self.examples.time,
self.examples.value - self.examples.value_pred
], names=('time', 'residual'))
return residuals
def get_rating(self, times, labels):
"""
Query label state(s) at the specified time accounting
for rating regression.
Args:
times (array of np.datetime64): Comparison datetimes
labels (array of string): Comparison entity labels
Returns:
rating (array): ratings for each time and label pair
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels = np.array(labels, dtype='str', ndmin=1)
ratings = np.empty_like(times, dtype='float')
for idx, (time, label) in enumerate(zip(times, labels)):
try:
label_record = self.record[label]
index = label_record.time.searchsorted(time)
prev_index = max(index - 1, 0)
prior_state = label_record[prev_index]
rating = self.evolve_rating(
prior_state.rating, time - prior_state.time)
except KeyError:
rating = self.initial_rating
ratings[idx] = rating
return ratings
def cdf(self, x, times, labels1, labels2, biases=0):
"""
Computes the comulative distribution function (CDF) for each
comparison, i.e. prob(value < x).
Args:
x (array of float): threshold of comparison for each value
times (array of np.datetime64): comparison datetimes
labels1 (array of str): comparison labels for first entity
labels2 (array of str): comparison labels for second entity
values (array of float): comparison value observed outcomes
biases (array of float): comparison bias correct factors,
default value is 0
Returns:
y (array of float): cumulative distribution function value
for each input
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels1 = np.array(labels1, dtype='str', ndmin=1)
labels2 = np.array(labels2, dtype='str', ndmin=1)
if np.isscalar(biases):
biases = np.full_like(times, biases, dtype='float')
else:
biases = np.array(biases, dtype='float', ndmin=1)
ratings1 = self.get_rating(times, labels1)
ratings2 = self.get_rating(times, labels2)
loc = self.compare(ratings1, ratings2) + self.commutator + biases
return norm.cdf(x, loc=loc, scale=self.scale)
def sf(self, x, times, labels1, labels2, biases=0):
"""
Computes the survival function (SF) for each
comparison, i.e. prob(value > x).
Args:
x (array of float): threshold of comparison for each value
times (array of np.datetime64): comparison datetimes
labels1 (array of str): comparison labels for first entity
labels2 (array of str): comparison labels for second entity
values (array of float): comparison value observed outcomes
biases (array of float): comparison bias correct factors,
default value is 0
Returns:
y (array of float): survival function value for each input
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels1 = np.array(labels1, dtype='str', ndmin=1)
labels2 = np.array(labels2, dtype='str', ndmin=1)
if np.isscalar(biases):
biases = np.full_like(times, biases, dtype='float')
else:
biases = np.array(biases, dtype='float', ndmin=1)
ratings1 = self.get_rating(times, labels1)
ratings2 = self.get_rating(times, labels2)
loc = self.compare(ratings1, ratings2) + self.commutator + biases
return np.squeeze(norm.sf(x, loc=loc, scale=self.scale))
def pdf(self, x, times, labels1, labels2, biases=0):
"""
Computes the probability distribution function (PDF) for each
comparison, i.e. P(x).
Args:
x (array of float): input values
times (array of np.datetime64): comparison datetimes
labels1 (array of str): comparison labels for first entity
labels2 (array of str): comparison labels for second entity
values (array of float): comparison value observed outcomes
biases (array of float): comparison bias correct factors,
default value is 0
Returns:
y (array of float): probability density at each input
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels1 = np.array(labels1, dtype='str', ndmin=1)
labels2 = np.array(labels2, dtype='str', ndmin=1)
if np.isscalar(biases):
biases = np.full_like(times, biases, dtype='float')
else:
biases = np.array(biases, dtype='float', ndmin=1)
ratings1 = self.get_rating(times, labels1)
ratings2 = self.get_rating(times, labels2)
loc = self.compare(ratings1, ratings2) + self.commutator + biases
return np.squeeze(norm.pdf(x, loc=loc, scale=self.scale))
def percentile(self, p, times, labels1, labels2, biases=0):
"""
Computes percentiles p of the probability distribution.
Args:
p (array of float): percentiles to evaluate (in range [0, 100])
times (array of np.datetime64): comparison datetimes
labels1 (array of str): comparison labels for first entity
labels2 (array of str): comparison labels for second entity
values (array of float): comparison value observed outcomes
biases (array of float): comparison bias correct factors,
default value is 0
Returns:
x (array of float): values of the distribution corresponding to
each percentile
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels1 = np.array(labels1, dtype='str', ndmin=1)
labels2 = np.array(labels2, dtype='str', ndmin=1)
if np.isscalar(biases):
biases = np.full_like(times, biases, dtype='float')
else:
biases = np.array(biases, dtype='float', ndmin=1)
ratings1 = self.get_rating(times, labels1)
ratings2 = self.get_rating(times, labels2)
loc = self.compare(ratings1, ratings2) + self.commutator + biases
p = np.true_divide(p, 100.0)
if np.count_nonzero(p < 0.0) or np.count_nonzero(p > 1.0):
raise ValueError("percentiles must be in the range [0, 100]")
return np.squeeze(norm.ppf(p, loc=loc, scale=self.scale))
def quantile(self, q, times, labels1, labels2, biases=0):
"""
Computes quantiles q of the probability distribution.
Same as percentiles but accepts values [0, 1].
Args:
q (array of float): quantiles to evaluate (in range [0, 1])
times (array of np.datetime64): comparison datetimes
labels1 (array of str): comparison labels for first entity
labels2 (array of str): comparison labels for second entity
values (array of float): comparison value observed outcomes
biases (array of float): comparison bias correct factors,
default value is 0
Returns:
x (array of float): values of the distribution corresponding to
each quantile
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels1 = np.array(labels1, dtype='str', ndmin=1)
labels2 = np.array(labels2, dtype='str', ndmin=1)
if np.isscalar(biases):
biases = np.full_like(times, biases, dtype='float')
else:
biases = np.array(biases, dtype='float', ndmin=1)
ratings1 = self.get_rating(times, labels1)
ratings2 = self.get_rating(times, labels2)
loc = self.compare(ratings1, ratings2) + self.commutator + biases
return np.squeeze(
norm.ppf(q, loc=loc[:, np.newaxis], scale=self.scale))
def mean(self, times, labels1, labels2, biases=0):
"""
Computes the mean of the probability distribution.
Args:
times (array of np.datetime64): comparison datetimes
labels1 (array of str): comparison labels for first entity
labels2 (array of str): comparison labels for second entity
values (array of float): comparison value observed outcomes
biases (array of float): comparison bias correct factors,
default value is 0
Returns:
y (array of float): mean of the probability distribution
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels1 = np.array(labels1, dtype='str', ndmin=1)
labels2 = np.array(labels2, dtype='str', ndmin=1)
if np.isscalar(biases):
biases = np.full_like(times, biases, dtype='float')
else:
biases = np.array(biases, dtype='float', ndmin=1)
ratings1 = self.get_rating(times, labels1)
ratings2 = self.get_rating(times, labels2)
loc = self.compare(ratings1, ratings2) + self.commutator + biases
return np.squeeze(loc)
def residuals(self, y_true=None, standardize=False):
"""
Computes residuals of the model predictions for each training example
Args:
standardize (bool): if True, the residuals are standardized to unit
variance
Returns:
residuals (array of float): residuals for each example
"""
y_pred = self.mean(
self.examples.time,
self.examples.label1,
self.examples.label2,
self.examples.bias)
if y_true is None:
y_true = self.examples.value
residuals = y_true - y_pred
if standardize is True:
quantiles = [.159, .841]
qlo, qhi = self.quantile(
quantiles,
self.examples.time,
self.examples.label1,
self.examples.label2,
self.examples.bias
).T
residuals /= .5*abs(qhi - qlo)
return residuals
def rank(self, time):
"""
Ranks labels by comparing mean of each label to the average label.
Args:
time (np.datetime64): time at which the ranking should be computed.
Returns:
label rankings (list of tuples): returns a rank sorted list of
(label, rank) pairs, where rank is the comparison value of
the specified summary statistic.
"""
ranked_list = [
(label, self.get_rating(time, label).item())
for label in self.labels]
return sorted(ranked_list, key=lambda v: v[1], reverse=True)
def sample(self, times, labels1, labels2, biases=0, size=1):
"""
Draw random samples from the predicted comparison probability
distribution.
Args:
times (array_like of np.datetime64): list of datetimes.
labels1 (array_like of string): list of first entity labels.
labels2 (array_like of string): list of second entity labels.
biases (array_like of float, optional): single bias number or
list of bias numbers which match the comparison inputs.
Default is 0, in which case no bias is used.
size (int, optional): number of samples to be drawn.
default is 1, in which case a single value is returned.
Returns:
x (array of float): random samples for the comparison outcome
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels1 = np.array(labels1, dtype='str', ndmin=1)
labels2 = np.array(labels2, dtype='str', ndmin=1)
ratings1 = self.get_rating(times, labels1)
ratings2 = self.get_rating(times, labels2)
if np.isscalar(biases):
biases = np.full_like(times, biases, dtype='float')
else:
biases = np.array(biases, dtype='float', ndmin=1)
if size < 1 or not isinstance(size, int):
raise ValueError("sample size must be a positive integer")
loc = self.compare(ratings1, ratings2) + self.commutator + biases
return norm.rvs(loc=loc, scale=self.scale, size=size)
| 36.750503 | 80 | 0.594854 | 18,187 | 0.995512 | 0 | 0 | 417 | 0.022826 | 0 | 0 | 8,791 | 0.481198 |
723f049018f7dbaf5f55c465cf88ce5aa8c8ec4d | 48 | py | Python | atcoder/abc179/a.py | sugitanishi/competitive-programming | 51af65fdce514ece12f8afbf142b809d63eefb5d | [
"MIT"
]
| null | null | null | atcoder/abc179/a.py | sugitanishi/competitive-programming | 51af65fdce514ece12f8afbf142b809d63eefb5d | [
"MIT"
]
| null | null | null | atcoder/abc179/a.py | sugitanishi/competitive-programming | 51af65fdce514ece12f8afbf142b809d63eefb5d | [
"MIT"
]
| null | null | null | s=input()
print(s+'s' if s[-1]!='s' else s+'es') | 24 | 38 | 0.520833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.208333 |
723fcadfa719088f86b59d8093c6f9655d115794 | 48,147 | py | Python | steady_cell_phenotype/poly.py | knappa/steadycellphenotype | b033f01ebc1fa062d310296f19f2f11b484cb557 | [
"MIT"
]
| 1 | 2021-12-13T22:20:19.000Z | 2021-12-13T22:20:19.000Z | steady_cell_phenotype/poly.py | knappa/steadycellphenotype | b033f01ebc1fa062d310296f19f2f11b484cb557 | [
"MIT"
]
| 5 | 2021-04-07T01:47:19.000Z | 2021-11-17T01:46:19.000Z | steady_cell_phenotype/poly.py | knappa/steadycellphenotype | b033f01ebc1fa062d310296f19f2f11b484cb557 | [
"MIT"
]
| null | null | null | from __future__ import annotations
import operator
from enum import Enum
from itertools import product
from typing import Dict, Union
import numpy as np
class Operation(Enum):
PLUS = 'PLUS'
MINUS = 'MINUS'
TIMES = 'TIMES'
EXP = 'EXP'
MAX = 'MAX'
MIN = 'MIN'
CONT = 'CONT'
NOT = 'NOT'
####################################################################################################
def h(x, fx):
"""helper function as in the PLoS article, doi:10.1371/journal.pcbi.1005352.t003 pg 16/24"""
fx = fx % 3
x = x % 3
if fx > x:
return x + 1
elif fx < x:
return x - 1
else:
return x
####################################################################################################
# monomial and sparse polynomial classes. These should be faster than the sympy versions due to
# their reduced scope.
####################################################################################################
class Expression(object):
def __add__(self, other):
return BinaryOperation('PLUS', self, other)
__radd__ = __add__
def __sub__(self, other):
return BinaryOperation('MINUS', self, other)
def __mul__(self, other):
return BinaryOperation('TIMES', self, other)
__rmul__ = __mul__
def __neg__(self):
return UnaryRelation('MINUS', self)
def __pow__(self, power, modulo=None):
return BinaryOperation('EXP', self, power)
# def __divmod__(self, other):
# raise NotImplementedError("division, modulus not implemented")
# def __truediv__(self, other):
# raise NotImplementedError("truediv not implemented")
# def __floordiv__(self, other):
# raise NotImplementedError("floordiv not implemented")
def eval(self, variable_dict):
"""
evaluates the expression. variable_dict is expected to be a dict containing str:Expression or
Monomial:Expression pairs. The latter are constrained to be of single-variable type.
:param variable_dict: a dictionary of taking either single-term monomials or string (variable names) to ints
:return: evaluated expression
"""
raise NotImplementedError("eval() unimplemented in " + str(type(self)))
def is_constant(self):
raise NotImplementedError("is_constant() unimplemented in " + str(type(self)))
def as_c_expression(self):
raise NotImplementedError("as_c_expression() unimplemented in " + str(type(self)))
def as_polynomial(self) -> Union[int, Expression]:
raise NotImplementedError("as_polynomial() unimplemented in " + str(type(self)))
# def as_sympy(self):
# """
# converts to sympy expression
#
# Returns
# -------
# sympy expression
# """
# raise NotImplementedError("as_sympy() unimplemented in " + str(type(self)))
def as_numpy_str(self, variables) -> str:
"""
returns numpy-based function of variables, with order corresponding to that
given in the variables parameter
Parameters
----------
variables
Returns
-------
lambda with len(variables) parameters
"""
raise NotImplementedError("as_numpy_str() unimplemented in " + str(type(self)))
def get_variable_set(self):
""" returns a set containing all variable which occur in this expression """
raise NotImplementedError("get_var_set() unimplemented in " + str(type(self)))
def num_variables(self):
""" returns the number of variables which occur in this expression """
return len(self.get_variable_set())
def rename_variables(self, name_dict: Dict[str, str]):
""" rename variables """
raise NotImplementedError("rename_variables() unimplemented in " + str(type(self)))
def continuous_function_version(self, control_variable):
"""
Wrap this equation with the 'continuity controller' i.e. return CONT(control_variable,self)
:param control_variable: variable or string
:return: functional continuous version
"""
if self.is_constant():
return self
if isinstance(control_variable, str):
control_variable = Monomial.as_var(control_variable)
return Function('CONT', [control_variable, self])
####################################################################################################
#
# the following method converts a system of equations into one which is "continuous" in the sense
# that application of the system does not change the per-coordinate values by more than 1. This is
# accomplished by a type of curve fitting. Fortunately, the formula for this
#
# g(x) = sum_{c\in \F_3^n} h(c) prod_{j=0}^n (1-(x_j-c_j)**2)
#
# (as seen in the PLoS article, doi:10.1371/journal.pcbi.1005352.t003 pg 16/24) admits a recursive
# formulation. That is, for a polynomial x_k = f_k(x_0,x_1,...,x_l) we can select one of the
# variables, say x_0 and reduce the polynomial each of 3-ways x_0=0, x_0=1, and x_0=2. This
# correspondingly divides the sum into those which have each of the 3 types of terms
# (1-(x_0-c_0)**2) for c_0=0, c_0=1, and c_0=2
#
# fortunately, (1-(x_j-0)**2)+(1-(x_j-1)**2)+(1-(x_j-2)**2) = 1 so if the evaluations of f become
# constant or even simply eliminate a variable, we need no longer consider that variable.
#
# recursion proceeds by eliminating variables in this manner, multiplying by the appropriate fitting
# term (1-(x_j-c_j)**2) (c_j being the evaluated value of x_j) on the way up.
#
# this comment is not really the place for a full proof of this method, but the proof is easily
# obtained from the above.
#
####################################################################################################
def continuous_polynomial_version(self, control_variable):
if self.is_constant():
return self
if isinstance(control_variable, str):
control_variable = Monomial.as_var(control_variable)
# as the control variable is special (due to use in the 'h' function),
# we will need to go through the procedure for it separately, first
accumulator = Mod3Poly.zero()
for control_variable_value in range(3):
evaluated_poly = self.eval({control_variable: control_variable_value})
if is_integer(evaluated_poly) or evaluated_poly.is_constant():
computed_value = int(evaluated_poly)
continuous_value = h(control_variable_value, computed_value)
accumulator += continuous_value * (1 - (control_variable - control_variable_value) ** 2)
else:
accumulator += evaluated_poly.continuous_version_helper(control_variable_value) * \
(1 - (control_variable - control_variable_value) ** 2)
return accumulator
def continuous_version_helper(self, control_variable_value):
# find some free variable
free_variable = tuple(self.get_variable_set())[0]
if isinstance(free_variable, str):
free_variable = Monomial.as_var(free_variable)
# iterate over the ways of setting that variable: 0, 1, 2
accumulator = Mod3Poly.zero()
for free_variable_value in range(3):
evaluated_poly = self.eval({free_variable: free_variable_value})
if is_integer(evaluated_poly) or evaluated_poly.is_constant():
computed_value = int(evaluated_poly)
continuous_value = h(control_variable_value, computed_value)
accumulator += \
continuous_value * (1 - (free_variable - free_variable_value) ** 2)
else:
accumulator += evaluated_poly.continuous_version_helper(control_variable_value) * \
(1 - (free_variable - free_variable_value) ** 2)
return accumulator
####################################################################################################
def rename_helper(expression: Union[Expression, int], name_dict: Dict[str, str]):
if is_integer(expression):
return expression
else:
return expression.rename_variables(name_dict=name_dict)
####################################################################################################
# actions on expressions, suitable for conversion to polynomial form. Not best for simulator.
def mod_3(n):
return n % 3
def not3(n):
value = 2 + 2 * n
if is_integer(value) or value.is_constant():
return mod_3(int(value))
else:
return value
def max3(a, b):
value = a + b + 2 * a * b + (a ** 2) * b + a * (b ** 2) + (a ** 2) * (b ** 2)
if is_integer(value) or value.is_constant():
return mod_3(int(value))
else:
return value
def min3(a, b):
value = a * b + 2 * (a ** 2) * b + 2 * a * (b ** 2) + 2 * (a ** 2) * (b ** 2)
if is_integer(value) or value.is_constant():
return mod_3(int(value))
else:
return value
def is_integer(x):
return isinstance(x, int) or isinstance(x, np.integer)
####################################################################################################
class Function(Expression):
def __init__(self, function_name, expression_list):
self._function_name = function_name
self._expression_list = expression_list
def rename_variables(self, name_dict: Dict[str, str]):
renamed_parameters = [rename_helper(expr, name_dict) for expr in self._expression_list]
return Function(self._function_name, renamed_parameters)
def eval(self, variable_dict):
# evaluate function parameters
evaluated_expressions = [expr if is_integer(expr)
else expr.eval(variable_dict)
for expr in self._expression_list]
# simplify constants to ints, if possible
evaluated_expressions = [int(expr) if is_integer(expr) or expr.is_constant()
else expr
for expr in evaluated_expressions]
if self._function_name == 'MAX':
assert len(evaluated_expressions) == 2, "wrong number of arguments for MAX"
expr_one, expr_two = evaluated_expressions
# if it can be computed directly, do it. otherwise, return in function form
if is_integer(expr_one) and is_integer(expr_two):
expr_one = mod_3(expr_one)
expr_two = mod_3(expr_two)
return max(expr_one, expr_two)
elif is_integer(expr_one) and expr_one == 2:
return 2
elif is_integer(expr_one) and expr_one == 0:
return expr_two
elif is_integer(expr_two) and expr_two == 2:
return 2
elif is_integer(expr_two) and expr_two == 0:
return expr_one
else:
return Function('MAX', [expr_one, expr_two])
elif self._function_name == 'MIN':
assert len(evaluated_expressions) == 2, "wrong number of arguments for MIN"
expr_one, expr_two = evaluated_expressions
# if it can be computed directly, do it. otherwise, return in function form
if is_integer(expr_one) and is_integer(expr_two):
expr_one = mod_3(expr_one)
expr_two = mod_3(expr_two)
return min(expr_one, expr_two)
elif is_integer(expr_one) and expr_one == 2:
return expr_two
elif is_integer(expr_one) and expr_one == 0:
return 0
elif is_integer(expr_two) and expr_two == 2:
return expr_one
elif is_integer(expr_two) and expr_two == 0:
return 0
else:
return Function('MIN', [expr_one, expr_two])
elif self._function_name == 'CONT':
assert len(evaluated_expressions) == 2, "wrong number of arguments for CONT"
ctrl_var, expr = evaluated_expressions
if is_integer(ctrl_var):
raise Exception("Unsupported; nonsense")
return Function('CONT', [ctrl_var, expr])
elif self._function_name == 'NOT':
assert len(evaluated_expressions) == 1, "wrong number of arguments for NOT"
expr = evaluated_expressions[0]
# if it can be computed directly, do it. otherwise, return in function form
if is_integer(expr):
return not3(int(expr))
else:
return Function('NOT', [expr])
else:
raise Exception("cannot evaluate unknown function " + self._function_name)
def is_constant(self):
return all(is_integer(expr) or expr.is_constant()
for expr in self._expression_list)
def __str__(self):
return self._function_name + "(" + ",".join([str(exp) for exp in self._expression_list]) + ")"
__repr__ = __str__
def as_c_expression(self):
c_exprs = [str(expr) if is_integer(expr) else expr.as_c_expression() for expr in self._expression_list]
if self._function_name == 'MAX':
func_name = 'mod3max'
elif self._function_name == 'MIN':
func_name = 'mod3min'
elif self._function_name == 'CONT':
func_name = 'mod3continuity'
elif self._function_name == 'NOT':
func_name = 'mod3not'
else:
raise Exception("Unknown binary relation: " + self._function_name)
return func_name + '(' + ",".join(c_exprs) + ')'
def as_polynomial(self):
expressions_as_polynomials = [mod_3(expr) if is_integer(expr)
else expr.as_polynomial()
for expr in self._expression_list]
if self._function_name == 'MAX':
assert len(expressions_as_polynomials) == 2, "wrong number of arguments for MAX"
return max3(expressions_as_polynomials[0], expressions_as_polynomials[1])
elif self._function_name == 'MIN':
assert len(expressions_as_polynomials) == 2, "wrong number of arguments for MIN"
return min3(expressions_as_polynomials[0], expressions_as_polynomials[1])
elif self._function_name == 'CONT':
assert len(expressions_as_polynomials) == 2, "wrong number of arguments for CONT"
return expressions_as_polynomials[1].continuous_polynomial_version(expressions_as_polynomials[0])
elif self._function_name == 'NOT':
assert len(expressions_as_polynomials) == 1, "wrong number of arguments for NOT"
return not3(expressions_as_polynomials[0])
else:
raise Exception("cannot evaluate unknown function " + self._function_name + " as a polynomial")
# def as_sympy(self):
#
# def cont_sympy(control, expr):
# return expr if is_integer(expr) \
# else expr.continuous_polynomial_version(control)
#
# def not_sympy(expr):
# return 1 - expr
#
# # tuples are param-count, function
# functions = {'MAX': (2, sympy.Max),
# 'MIN': (2, sympy.Min),
# 'CONT': (2, cont_sympy),
# 'NOT': (1, not_sympy)}
#
# if self._function_name not in functions:
# raise Exception("cannot evaluate unknown function " + self._function_name + " as a sympy expression")
#
# if len(self._expression_list) != functions[self._function_name][0]:
# raise Exception(f"Wrong number of arguments for {self._function_name}")
#
# function = functions[self._function_name][1]
#
# sympy_expressions = [sympy.Mod(expr, 3) if is_integer(expr)
# else sympy.Mod(expr.as_sympy(), 3)
# for expr in self._expression_list]
# return function(*sympy_expressions)
def as_numpy_str(self, variables) -> str:
np_parameter_strings = [str(expr) if is_integer(expr)
else expr.as_numpy_str(variables)
for expr in self._expression_list]
# this one is slow
# continuous_str = "( (({1})>({0})) * (({0})+1) + (({1})<({0})) * (({0})-1) + (({1})==({0}))*({0}) )"
continuous_str = "( {0}+np.sign(np.mod({1},3)-np.mod({0},3)) )"
max_str = "np.maximum(np.mod({0},3),np.mod({1},3))"
min_str = "np.minimum(np.mod({0},3),np.mod({1},3))"
not_str = "(2-({0}))"
# tuples are param-count, function
function_strings = {'MAX': (2, max_str),
'MIN': (2, min_str),
'CONT': (2, continuous_str),
'NOT': (1, not_str)}
if self._function_name not in function_strings:
raise Exception("cannot evaluate unknown function " + self._function_name + " as a numpy function")
if len(self._expression_list) != function_strings[self._function_name][0]:
raise Exception(f"Wrong number of arguments for {self._function_name}")
function = function_strings[self._function_name][1]
return function.format(*np_parameter_strings)
def get_variable_set(self):
var_set = set()
for expr in self._expression_list:
if not is_integer(expr):
var_set = var_set.union(expr.get_variable_set())
return var_set
class BinaryOperation(Expression):
def __init__(self, relation_name, left_expression: Union[Expression, int],
right_expression: Union[Expression, int]):
self.relation_name = relation_name
self._left_expression: Union[Expression, int] = left_expression
self._right_expression: Union[Expression, int] = right_expression
def rename_variables(self, name_dict: Dict[str, str]):
renamed_left_expression = rename_helper(self._left_expression, name_dict)
renamed_right_expression = rename_helper(self._right_expression, name_dict)
return BinaryOperation(self.relation_name,
left_expression=renamed_left_expression,
right_expression=renamed_right_expression)
def is_constant(self):
return (is_integer(self._left_expression) or self._left_expression.is_constant()) and \
(is_integer(self._right_expression) or self._right_expression.is_constant())
def eval(self, variable_dict):
"""
evaluate parameters, making them ints if possible
:param variable_dict: a dictionary of taking either single-term monomials or string (variable names) to ints
:return: evaluated expression
"""
evaled_left_expr = self._left_expression if is_integer(self._left_expression) \
else self._left_expression.eval(variable_dict)
evaled_left_expr = int(evaled_left_expr) \
if is_integer(evaled_left_expr) or evaled_left_expr.is_constant() \
else evaled_left_expr
evaled_right_expr = self._right_expression if is_integer(self._right_expression) \
else self._right_expression.eval(variable_dict)
evaled_right_expr = int(evaled_right_expr) \
if is_integer(evaled_right_expr) or evaled_right_expr.is_constant() \
else evaled_right_expr
if self.relation_name == 'PLUS':
return evaled_left_expr + evaled_right_expr
elif self.relation_name == 'MINUS':
return evaled_left_expr - evaled_right_expr
elif self.relation_name == 'TIMES':
return evaled_left_expr * evaled_right_expr
elif self.relation_name == 'EXP':
return evaled_left_expr ** evaled_right_expr
else:
raise Exception("cannot evaluate unknown binary op: " + self.relation_name)
def __str__(self):
short_relation_name = "?"
if self.relation_name == 'PLUS':
short_relation_name = '+'
elif self.relation_name == 'MINUS':
short_relation_name = '-'
elif self.relation_name == 'TIMES':
short_relation_name = '*'
elif self.relation_name == 'EXP':
short_relation_name = '^'
left_side = str(self._left_expression)
if isinstance(self._left_expression, BinaryOperation):
left_side = "(" + left_side + ")"
right_side = str(self._right_expression)
if isinstance(self._right_expression, BinaryOperation):
right_side = "(" + right_side + ")"
return left_side + short_relation_name + right_side
__repr__ = __str__
def as_c_expression(self):
if is_integer(self._left_expression):
left_c_expr = str(self._left_expression)
else:
left_c_expr = self._left_expression.as_c_expression()
if is_integer(self._right_expression):
right_c_expr = str(self._right_expression)
else:
right_c_expr = self._right_expression.as_c_expression()
if self.relation_name == 'PLUS':
return '(' + left_c_expr + ')+(' + right_c_expr + ')'
elif self.relation_name == 'MINUS':
return '(' + left_c_expr + ')-(' + right_c_expr + ')'
elif self.relation_name == 'TIMES':
return '(' + left_c_expr + ')*(' + right_c_expr + ')'
elif self.relation_name == 'EXP':
return 'mod3pow(' + left_c_expr + ',' + right_c_expr + ')'
else:
raise Exception("Unknown binary relation: " + self.relation_name)
def as_polynomial(self):
if is_integer(self._left_expression):
left_poly = self._left_expression
else:
left_poly = self._left_expression.as_polynomial()
if is_integer(self._right_expression):
right_poly = self._right_expression
else:
right_poly = self._right_expression.as_polynomial()
if self.relation_name == 'PLUS':
return left_poly + right_poly
elif self.relation_name == 'MINUS':
return left_poly - right_poly
elif self.relation_name == 'TIMES':
return left_poly * right_poly
elif self.relation_name == 'EXP':
# simplify the exponent = 0, 1 cases
if is_integer(right_poly):
if right_poly == 0:
return 1
elif right_poly == 1:
return left_poly
else:
return left_poly ** right_poly
else:
return left_poly ** right_poly
else:
raise Exception("Unknown binary relation: " + self.relation_name)
# def as_sympy(self):
# """
# Convert to sympy expression
# Returns
# -------
# sympy expression
# """
#
# def simple_pow(left_exp, right_exp):
# # simplify the exponent = 0, 1 cases
# if is_integer(right_exp):
# if right_exp == 0:
# return 1
# elif right_exp == 1:
# return left_exp
# else:
# return left_exp ** right_exp
# else:
# return left_exp ** right_exp
#
# relations = {'PLUS': operator.add,
# 'MINUS': operator.sub,
# 'TIMES': operator.mul,
# 'EXP': simple_pow}
#
# if self.relation_name not in relations:
# raise Exception("Unknown binary relation: " + self.relation_name)
#
# lhs = self._left_expression if is_integer(self._left_expression) else self._left_expression.as_sympy()
# rhs = self._right_expression if is_integer(self._right_expression) else self._right_expression.as_sympy()
#
# return relations[self.relation_name](lhs, rhs)
def as_numpy_str(self, variables) -> str:
"""
Convert to numpy function
Parameters
----------
variables
Returns
-------
str version of numpy function
"""
relations = {'PLUS': "(({0})+({1}))",
'MINUS': "(({0})-({1}))",
'TIMES': "(({0})*({1}))",
'EXP': "(({0})**({1}))"}
if self.relation_name not in relations:
raise Exception("Unknown binary relation: " + self.relation_name)
lhs = str(self._left_expression) if is_integer(self._left_expression) \
else self._left_expression.as_numpy_str(variables)
rhs = str(self._right_expression) if is_integer(self._right_expression) \
else self._right_expression.as_numpy_str(variables)
return relations[self.relation_name].format(lhs, rhs)
def get_variable_set(self):
var_set = set()
if not is_integer(self._left_expression):
var_set = var_set.union(self._left_expression.get_variable_set())
if not is_integer(self._right_expression):
var_set = var_set.union(self._right_expression.get_variable_set())
return var_set
class UnaryRelation(Expression):
def __init__(self, relation_name, expr):
self._relation_name = relation_name
self._expr = expr
def rename_variables(self, name_dict: Dict[str, str]):
return UnaryRelation(relation_name=self._relation_name,
expr=rename_helper(self._expr, name_dict))
def is_constant(self):
return self._expr.is_constant()
def eval(self, variable_dict):
if self._relation_name == 'MINUS':
if is_integer(self._expr):
return (-1) * self._expr
elif type(self._expr) == Expression:
evaluated_subexpression = self._expr.eval(variable_dict)
if is_integer(evaluated_subexpression) or evaluated_subexpression.is_constant():
return (-1) * int(evaluated_subexpression)
else:
return (-1) * evaluated_subexpression
else:
raise Exception("UnaryRelation in bad state with unknown unary relation name")
def __str__(self) -> str:
short_rel_name = str(self._relation_name)
if self._relation_name == 'MINUS':
short_rel_name = '-'
return short_rel_name + (
"(" + str(self._expr) + ")" if type(self._expr) == BinaryOperation else str(self._expr))
__repr__ = __str__
def as_c_expression(self):
if is_integer(self._expr):
c_exp = str(mod_3(self._expr))
else:
c_exp = self._expr.as_c_expression()
if self._relation_name == 'MINUS':
return '-(' + c_exp + ')'
else:
raise Exception("Unknown binary relation: " + self._relation_name)
def as_polynomial(self):
if is_integer(self._expr) or self._expr.is_constant():
poly = mod_3(int(self._expr))
else:
poly = self._expr.as_polynomial()
if self._relation_name == 'MINUS':
return (-1) * poly
else:
raise Exception("Unknown unary relation: " + self._relation_name)
def as_sympy(self):
"""
Convert to sympy expression
Returns
-------
sympy expression
"""
relations = {'MINUS': operator.neg}
if self._relation_name not in relations:
raise Exception("Unknown unary relation: " + self._relation_name)
expr = self._expr if is_integer(self._expr) else self._expr.as_sympy()
return relations[self._relation_name](expr)
def as_numpy_str(self, variables):
"""
Convert to numpy function
Parameters
----------
variables
Returns
-------
str numpy-representation
"""
relations = {'MINUS': "(-({0}))"}
if self._relation_name not in relations:
raise Exception("Unknown unary relation: " + self._relation_name)
expr_str = str(self._expr) if is_integer(self._expr) \
else self._expr.as_numpy_str(variables)
return relations[self._relation_name].format(expr_str)
def get_variable_set(self):
if is_integer(self._expr):
return set()
else:
return self._expr.get_variable_set()
####################################################################################################
class Monomial(Expression):
"""A class to encapsulate monomials reduced by x^3-x==0 for all variables x"""
def __init__(self, power_dict: dict):
# copy over only those terms which actually appear
self._power_dict = {str(var): power_dict[var] for var in power_dict if power_dict[var] != 0}
for var in self._power_dict.keys():
# while self._power_dict[var] < 0:
# self._power_dict[var] += 2 <--- replace with below
assert self._power_dict[var] > 0 # b/c x^-1 isn't exactly x (i.e. when x=0)
# while self._power_dict[var] >= 3:
# self._power_dict[var] -= 2 <--- replace with below
self._power_dict[var] = 1 + ((-1 + self._power_dict[var]) % 2)
def rename_variables(self, name_dict: Dict[str, str]):
# this ends up a little more complicated than I was originally thinking, b/c
# I would like to allow two variables to be updated to the same new name
renamed_dict = dict()
for variable, exponent in self._power_dict.items():
name = variable
if variable in name_dict:
name = name_dict[variable]
if name in renamed_dict:
renamed_dict[name] += self._power_dict[variable]
renamed_dict[name] = 1 + ((-1 + renamed_dict[name]) % 2)
else:
renamed_dict[name] = self._power_dict[variable]
return Monomial(power_dict=renamed_dict)
def as_polynomial(self):
return self
def is_constant(self):
return len(self._power_dict) == 0
def num_variables(self):
return len(self._power_dict)
def variable_list(self):
return self._power_dict.keys()
def eval(self, variable_dict: Dict):
"""evaluates the monomial. variable_dict is expected to be a dict containing str:Expression or
Monomial:Expression pairs. The latter are constrained to be of single-variable type.
"""
if type(variable_dict) != dict:
raise Exception("eval is not defined on this input")
# sanitize inputs
sanitized_variable_dict = dict()
for variable, quantity in variable_dict.items():
if type(variable) == str:
sanitized_variable_dict.update({variable: variable_dict[variable]})
elif type(variable) == Monomial:
if variable.num_variables() != 1:
raise Exception(
"We do not know how to evaluate monomials of zero or several variables to a single number")
else:
variable_as_str = list(variable.variable_list())[0]
sanitized_variable_dict.update({variable_as_str: variable_dict[variable]})
variable_dict = sanitized_variable_dict
accumulator = Mod3Poly.one()
for variable, quantity in self._power_dict.items():
if variable in variable_dict.keys():
accumulator *= variable_dict[variable] ** self._power_dict[variable]
else:
accumulator *= Monomial.as_var(variable) ** self._power_dict[variable]
return accumulator
def get_variable_set(self):
""" returns a set containing all variable which occur in this monomial """
return {var for var in self._power_dict if self._power_dict[var] != 0}
@staticmethod
def unit():
"""produces the unit, 1, as a monomial"""
return Monomial(dict())
@staticmethod
def as_var(var_name: str):
return Monomial({var_name: 1})
def __mul__(self, other) -> Expression:
if isinstance(other, Monomial):
result_power_dict = self._power_dict.copy()
for key in other._power_dict.keys():
if key in result_power_dict.keys():
result_power_dict[key] += other._power_dict[key]
while result_power_dict[key] >= 3:
result_power_dict[key] -= 2
else:
result_power_dict[key] = other._power_dict[key]
return Monomial(result_power_dict)
elif isinstance(other, Mod3Poly) or is_integer(other):
return self.as_poly() * other
else:
return BinaryOperation('TIMES', self, other)
# raise TypeError("unsupported operand type(s) for *: '{}' and '{}'".format(self.__class__, type(other)))
__rmul__ = __mul__
def __neg__(self):
return (-1) * self
def __pow__(self, power, **kwargs):
if type(power) == Mod3Poly and power.is_constant():
power = power[Monomial.unit()]
assert is_integer(power)
if power == 0:
return Monomial.unit()
elif power == 1:
return self
elif power == 2:
return self * self
# Now handle higher powers; probably not going to happen too much for this application
# (int) half power root
int_root = self ** (power // 2)
if power % 2 == 0:
return int_root * int_root
else:
return int_root * int_root * self
def as_poly(self):
"""converts this monomial to a polynomial with only one term"""
return Mod3Poly({self: 1})
def __add__(self, other):
if isinstance(other, Mod3Poly):
return other + self.as_poly()
elif isinstance(other, Monomial):
return self.as_poly() + other.as_poly()
elif is_integer(other):
return self.as_poly() + other
elif isinstance(other, Expression):
return BinaryOperation("PLUS", self, other)
else:
raise TypeError("unsupported operand type(s) for +: '{}' and '{}'".format(self.__class__, type(other)))
def __radd__(self, other):
return self + other
def __sub__(self, other):
return self + ((-1) * other)
def __rsub__(self, other):
return ((-1) * self) + other
def __eq__(self, other):
if type(other) == str:
other = Monomial.as_var(other)
if type(other) == Monomial:
return self._power_dict == other._power_dict
elif type(other) == Mod3Poly:
if len(other.coeff_dict) == 1:
monomial, coeff = list(other.coeff_dict)[0]
return coeff == 1 and monomial == self
else:
return False
elif is_integer(other) and self == Monomial.unit():
return other == 1
else:
return False
def __ne__(self, other):
if type(other) == str:
other = Monomial.as_var(other)
return not (self == other)
def __lt__(self, other):
self_vars = set(self._power_dict.keys())
if type(other) == str:
other = Monomial.as_var(other)
other_vars = set(other._power_dict.keys())
# if we have a var that they don't we cannot be "smaller"
if len(self_vars - other_vars) > 0:
return False
# check that we do not exceed and are smaller at least once
at_least_once_less = False
for var in self_vars:
if self._power_dict[var] > other._power_dict[var]:
return False
elif self._power_dict[var] < other._power_dict[var]:
at_least_once_less = True
return at_least_once_less or len(other_vars - self_vars) > 0
def __le__(self, other):
self_vars = set(self._power_dict.keys())
if type(other) == str:
other = Monomial.as_var(other)
other_vars = set(other._power_dict.keys())
# if we have a var that they don't we cannot be "smaller"
if len(self_vars - other_vars) > 0:
return False
# check that we do not exceed
for var in self_vars:
if self._power_dict[var] > other._power_dict[var]:
return False
return True
def __gt__(self, other):
self_vars = set(self._power_dict.keys())
if type(other) == str:
other = Monomial.as_var(other)
other_vars = set(other._power_dict.keys())
# if they have a var that they don't we cannot be "greater"
if len(other_vars - self_vars) > 0:
return False
# check that we are not smaller and are greater at least once
at_least_once_greater = False
for var in other_vars:
if self._power_dict[var] < other._power_dict[var]:
return False
elif self._power_dict[var] > other._power_dict[var]:
at_least_once_greater = True
return at_least_once_greater or len(self_vars - other_vars) > 0
def __ge__(self, other):
self_vars = set(self._power_dict.keys())
if type(other) == str:
other = Monomial.as_var(other)
other_vars = set(other._power_dict.keys())
# if they have a var that they don't we cannot be "greater"
if len(other_vars - self_vars) > 0:
return False
# check that we are not smaller
for var in other_vars:
if self._power_dict[var] < other._power_dict[var]:
return False
return True
def __hash__(self):
return sum(hash(k) for k in self._power_dict.keys()) + \
sum(hash(v) for v in self._power_dict.values())
def __str__(self):
if self._power_dict == {}:
return "1"
else:
variables = sorted(self._power_dict.keys())
return "*".join([str(var) + "^" + str(self._power_dict[var])
if self._power_dict[var] > 1 else str(var) for var in variables])
__repr__ = __str__
def as_c_expression(self):
if self._power_dict == {}:
return "1"
else:
variables = sorted(self._power_dict.keys())
return "*".join(["mod3pow(" + str(var) + "," + str(self._power_dict[var]) + ")"
if self._power_dict[var] > 1 else str(var) for var in variables
if self._power_dict[var] != 0])
# def as_sympy(self):
# # sympy empty product is 1, consistent with power_dict
# return sympy.prod([sympy.Symbol(var, integer=True) ** pow
# for var, pow in self._power_dict.items()])
# # Fun fact: sympy doesn't recognize Symbol(var) and Symbol(var, integer=True) to be the same
def as_numpy_str(self, variables) -> str:
if len(self._power_dict) == 0:
return "1"
return '(' + \
'*'.join(["1".format(variables.index(var), self._power_dict[var])
if self._power_dict[var] == 0 else
"state[{0}]".format(variables.index(var))
if self._power_dict[var] == 1 else
"(state[{0}]**{1})".format(variables.index(var), self._power_dict[var])
for var in self._power_dict]) + \
')'
####################################################################################################
class Mod3Poly(Expression):
"""a sparse polynomial class"""
def __init__(self, coeffs: Union[Dict, int]):
if type(coeffs) == dict:
self.coeff_dict = {monomial: coeffs[monomial] for monomial in coeffs if coeffs[monomial] != 0}
elif is_integer(coeffs):
self.coeff_dict = {Monomial.unit(): (coeffs % 3)}
else:
raise TypeError("unsupported initialization type for '{}': '{}'".format(self.__class__, type(coeffs)))
def rename_variables(self, name_dict: Dict[str, str]):
return Mod3Poly(coeffs={monomial.rename_variables(name_dict): coeff
for monomial, coeff in self.coeff_dict.items()})
@staticmethod
def zero():
return Mod3Poly({Monomial.unit(): 0})
@staticmethod
def one():
return Mod3Poly({Monomial.unit(): 1})
def as_polynomial(self):
return self
def __int__(self):
self.__clear_zero_monomials()
if len(self.coeff_dict) > 1 or (len(self.coeff_dict) == 1 and Monomial.unit() not in self.coeff_dict):
raise Exception("cannot cast non-constant polynomial to int")
if Monomial.unit() in self.coeff_dict:
return self.coeff_dict[Monomial.unit()]
else:
return 0
def eval(self, variable_dict):
"""evaluates the polynomial. variable_dict is expected to be a dict containing str:Expression or
Monomial:Expression pairs. The latter are constrained to be of single-variable type. """
if type(variable_dict) != dict:
raise Exception("Mod3Poly.eval is not defined on this input")
accumulator = Mod3Poly.zero()
for monomial, coeff in self.coeff_dict.items():
accumulator += coeff * monomial.eval(variable_dict)
return accumulator
def get_variable_set(self):
"""return a set containing all variables which occur in this polynomial"""
var_set = set()
for monomial in self.coeff_dict:
var_set = var_set.union(monomial.get_variable_set())
return var_set
def __clear_zero_monomials(self):
"""purge unneeded data"""
self.coeff_dict = {monomial: self.coeff_dict[monomial]
for monomial in self.coeff_dict
if self.coeff_dict[monomial] != 0}
# assure at least one entry
if len(self.coeff_dict) == 0:
self.coeff_dict = {Monomial.unit(): 0}
def is_constant(self):
# possibly unnecessary
self.__clear_zero_monomials()
num_nonzero_monomial = len(self.coeff_dict)
if num_nonzero_monomial > 1:
return False
elif num_nonzero_monomial == 0:
return True
else:
# only one entry
return Monomial.unit() in self.coeff_dict
def __getitem__(self, index):
if index in self.coeff_dict:
return self.coeff_dict[index]
else:
return 0
def __setitem__(self, index, value):
self.coeff_dict[index] = value
def __add__(self, other):
if is_integer(other):
self_copy = Mod3Poly(self.coeff_dict)
self_copy[Monomial.unit()] = (self_copy[Monomial.unit()] + other) % 3
return self_copy
elif isinstance(other, Monomial):
self_copy = Mod3Poly(self.coeff_dict)
self_copy[other] += 1
return self_copy
elif isinstance(other, Mod3Poly):
self_copy = Mod3Poly(self.coeff_dict)
for key in other.coeff_dict.keys():
if key in self_copy.coeff_dict.keys():
self_copy[key] = (self_copy[key] + other[key]) % 3
else:
self_copy[key] = other[key]
return self_copy
elif isinstance(other, Expression):
return BinaryOperation('PLUS', self, other)
else:
raise TypeError("unsupported operand type(s) for +: '{}' and '{}'".format(self.__class__, type(other)))
__radd__ = __add__
def __sub__(self, other):
if is_integer(other):
self_copy = Mod3Poly(self.coeff_dict)
self_copy[Monomial.unit()] = (self_copy[Monomial.unit()] - other) % 3
return self_copy
elif isinstance(other, Mod3Poly) or isinstance(other, Monomial):
self_copy = Mod3Poly(self.coeff_dict)
if isinstance(other, Monomial):
other = other.as_poly()
for key in other.coeff_dict.keys():
if key in self_copy.coeff_dict.keys():
self_copy[key] = (self_copy[key] - other[key]) % 3
else:
self_copy[key] = other[key]
return self_copy
else:
raise TypeError("unsupported operand type(s) for +: '{}' and '{}'".format(self.__class__, type(other)))
def __rsub__(self, other):
return other + ((-1) * self)
def __mul__(self, other):
if is_integer(other):
return Mod3Poly({key: (self.coeff_dict[key] * other) % 3 for key in self.coeff_dict})
elif isinstance(other, Monomial):
return Mod3Poly({(other * monomial): self.coeff_dict[monomial] for monomial in self.coeff_dict})
elif isinstance(other, Mod3Poly):
accumulator = Mod3Poly.zero()
for self_mono, other_mono in product(self.coeff_dict.keys(), other.coeff_dict.keys()):
monomial_prod = self_mono * other_mono
accumulator[monomial_prod] = (accumulator[monomial_prod] + self[self_mono] * other[other_mono]) % 3
return accumulator
else:
return BinaryOperation('TIMES', self, other)
__rmul__ = __mul__
def __pow__(self, power, **kwargs):
if type(power) == Mod3Poly and power.is_constant():
power = power[Monomial.unit()]
assert is_integer(power)
if power == 0:
return Monomial.unit().as_poly()
elif power == 1:
return self
elif power == 2:
return self * self
# Now handle higher powers; probably not going to happen too much for this application
# (int) half power root
int_root = self ** (power // 2)
if power % 2 == 0:
return int_root * int_root
else:
return int_root * int_root * self
def __str__(self):
accumulator = ""
for monomial in sorted(self.coeff_dict.keys()):
if monomial == Monomial.unit():
if self[monomial] != 0:
accumulator += str(self[monomial])
else:
if len(accumulator) > 0 and self[monomial] != 0:
accumulator += "+"
if self[monomial] == 1:
accumulator += str(monomial)
elif self[monomial] == 2:
accumulator += "2*"
accumulator += str(monomial)
if len(accumulator) > 0:
return accumulator
else:
return "0"
__repr__ = __str__
def as_c_expression(self):
accumulator = ""
for monomial in sorted(self.coeff_dict.keys()):
if monomial == Monomial.unit():
if self[monomial] != 0:
accumulator += str(self[monomial])
else:
if len(accumulator) > 0 and self[monomial] != 0:
accumulator += "+"
if self[monomial] == 1:
accumulator += monomial.as_c_expression()
elif self[monomial] == 2:
accumulator += "2*"
accumulator += monomial.as_c_expression()
if len(accumulator) > 0:
return accumulator
else:
return "0"
# def as_sympy(self):
# return sum([coeff * expr.as_sympy() for expr, coeff in self.coeff_dict.items()])
def as_numpy_str(self, variables) -> str:
return '(' + \
"+".join(["({0}*({1}))".format(coeff, expr.as_numpy_str(variables))
for expr, coeff in self.coeff_dict.items()]) + \
')'
| 38.985425 | 117 | 0.577897 | 45,795 | 0.95115 | 0 | 0 | 343 | 0.007124 | 0 | 0 | 12,263 | 0.254699 |
72404631e2e0ae2fb28f9c18c6b107f7f88a83f4 | 23,165 | py | Python | django_tidb/features.py | killuminatixhr/django-tidb | 8de093dd7242fc70a5b9b5240711bef00722ff03 | [
"Apache-2.0"
]
| 17 | 2021-07-30T17:02:53.000Z | 2021-12-10T02:28:59.000Z | django_tidb/features.py | killuminatixhr/django-tidb | 8de093dd7242fc70a5b9b5240711bef00722ff03 | [
"Apache-2.0"
]
| 7 | 2021-08-02T09:56:27.000Z | 2022-03-23T03:36:07.000Z | django_tidb/features.py | killuminatixhr/django-tidb | 8de093dd7242fc70a5b9b5240711bef00722ff03 | [
"Apache-2.0"
]
| 6 | 2021-07-30T10:04:15.000Z | 2022-03-29T05:44:37.000Z | # Copyright 2021 PingCAP, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
from django.db.backends.mysql.features import (
DatabaseFeatures as MysqlDatabaseFeatures,
)
from django.utils.functional import cached_property
class DatabaseFeatures(MysqlDatabaseFeatures):
has_select_for_update = True
supports_transactions = False
uses_savepoints = False
can_release_savepoints = False
atomic_transactions = False
supports_atomic_references_rename = False
can_clone_databases = False
can_rollback_ddl = False
order_by_nulls_first = True
supports_foreign_keys = False
indexes_foreign_keys = False
create_test_procedure_without_params_sql = None
create_test_procedure_with_int_param_sql = None
test_collations = {
'ci': 'utf8mb4_general_ci',
'non_default': 'utf8mb4_bin',
}
@cached_property
def django_test_skips(self):
skips = {
"This doesn't work on MySQL.": {
'db_functions.comparison.test_greatest.GreatestTests.test_coalesce_workaround',
'db_functions.comparison.test_least.LeastTests.test_coalesce_workaround',
},
'Running on MySQL requires utf8mb4 encoding (#18392).': {
'model_fields.test_textfield.TextFieldTests.test_emoji',
'model_fields.test_charfield.TestCharField.test_emoji',
},
"MySQL doesn't support functional indexes on a function that "
"returns JSON": {
'schema.tests.SchemaTests.test_func_index_json_key_transform',
},
"MySQL supports multiplying and dividing DurationFields by a "
"scalar value but it's not implemented (#25287).": {
'expressions.tests.FTimeDeltaTests.test_durationfield_multiply_divide',
},
"tidb": {
# "Expression #5 of SELECT list is not in GROUP BY clause and contains nonaggregated column
# 'test_django_tests.aggregation_regress_alfa.id' which is not functionally dependent on columns in
# GROUP BY clause; this is incompatible with sql_mode=only_full_group_by"
'aggregation.tests.AggregateTestCase.test_annotate_defer_select_related',
'aggregation_regress.tests.AggregationTests.test_aggregate_duplicate_columns_select_related',
'aggregation_regress.tests.AggregationTests.test_boolean_conversion',
'aggregation_regress.tests.AggregationTests.test_more_more',
'aggregation_regress.tests.JoinPromotionTests.test_ticket_21150',
'annotations.tests.NonAggregateAnnotationTestCase.test_annotation_aggregate_with_m2o',
'defer_regress.tests.DeferAnnotateSelectRelatedTest.test_defer_annotate_select_related',
'queries.test_explain.ExplainTests',
'queries.test_qs_combinators.QuerySetSetOperationTests.'
'test_union_with_values_list_and_order_on_annotation',
'queries.test_qs_combinators.QuerySetSetOperationTests.test_union_with_values_list_and_order',
'queries.test_qs_combinators.QuerySetSetOperationTests.test_ordering_subqueries',
'queries.test_qs_combinators.QuerySetSetOperationTests.test_ordering_by_f_expression_and_alias',
'queries.test_qs_combinators.QuerySetSetOperationTests.test_ordering_by_f_expression',
'queries.test_qs_combinators.QuerySetSetOperationTests.test_ordering_by_alias',
'queries.test_qs_combinators.QuerySetSetOperationTests.test_ordering',
'queries.test_qs_combinators.QuerySetSetOperationTests.test_order_by_same_type',
'queries.test_qs_combinators.QuerySetSetOperationTests.test_combining_multiple_models',
# is unrelation with tidb
'file_uploads.tests.DirectoryCreationTests.test_readonly_root',
'cache.tests.CacheMiddlewareTest.test_cache_page_timeout',
# RuntimeError: A durable atomic block cannot be nested within another atomic block.
'transactions.tests.DisableDurabiltityCheckTests.test_nested_both_durable',
'transactions.tests.DisableDurabiltityCheckTests.test_nested_inner_durable',
# wrong test result
'.test_no_duplicates_for_non_unique_related_object_in_search_fields',
'transaction_hooks.tests.TestConnectionOnCommit.test_inner_savepoint_does_not_affect_outer',
'filtered_relation.tests.FilteredRelationTests.test_select_for_update',
'filtered_relation.tests.FilteredRelationTests.test_union',
'fixtures_regress.tests.TestFixtures.test_loaddata_raises_error_when_fixture_has_invalid_foreign_key',
'introspection.tests.IntrospectionTests.test_get_table_description_nullable',
# django.db.transaction.TransactionManagementError: An error occurred in the current transaction. You
# can't execute queries until the end of the 'atomic' block.
'transaction_hooks.tests.TestConnectionOnCommit.test_inner_savepoint_rolled_back_with_outer',
'transaction_hooks.tests.TestConnectionOnCommit.test_discards_hooks_from_rolled_back_savepoint',
'transaction_hooks.tests.TestConnectionOnCommit.test_inner_savepoint_rolled_back_with_outer',
# AssertionError: True is not false
'sites_tests.tests.CreateDefaultSiteTests.test_multi_db_with_router',
# AssertionError: {} != {'example2.com': <Site: example2.com>}
'sites_tests.tests.SitesFrameworkTests.test_clear_site_cache_domain',
# AttributeError: 'NoneType' object has no attribute 'ping'
'servers.test_liveserverthread.LiveServerThreadTest.test_closes_connections',
'test_utils.tests.TestBadSetUpTestData.test_failure_in_setUpTestData_should_rollback_transaction',
'test_utils.test_testcase.TestDataTests.test_undeepcopyable_warning',
'test_utils.test_testcase.TestDataTests.test_class_attribute_identity',
'test_utils.tests.CaptureOnCommitCallbacksTests.test_execute',
'test_utils.tests.CaptureOnCommitCallbacksTests.test_no_arguments',
'test_utils.tests.CaptureOnCommitCallbacksTests.test_pre_callback',
'test_utils.tests.CaptureOnCommitCallbacksTests.test_using',
# [planner:3065]Expression #1 of ORDER BY clause is not in SELECT list, references column '' which is
# not in SELECT list; this is incompatible with
'ordering.tests.OrderingTests.test_orders_nulls_first_on_filtered_subquery',
# You have an error in your SQL syntax
'schema.tests.SchemaTests.test_func_index_cast',
'schema.tests.SchemaTests.test_add_field_binary',
'schema.tests.SchemaTests.test_add_textfield_default_nullable',
'schema.tests.SchemaTests.test_add_textfield_unhashable_default',
# Unsupported modify column: this column has primary key flag
'schema.tests.SchemaTests.test_alter_auto_field_to_char_field',
# Unsupported modify column: can't remove auto_increment without @@tidb_allow_remove_auto_inc enabled
'schema.tests.SchemaTests.test_alter_auto_field_to_integer_field',
# 'Unsupported modify column: this column has primary key flag
'schema.tests.SchemaTests.test_alter_autofield_pk_to_smallautofield_pk_sequence_owner',
# Found wrong number (0) of check constraints for schema_author.height
'schema.tests.SchemaTests.test_alter_field_default_dropped',
# Unsupported modify column: can't set auto_increment
'schema.tests.SchemaTests.test_alter_int_pk_to_autofield_pk',
'schema.tests.SchemaTests.test_alter_int_pk_to_bigautofield_pk',
# Unsupported drop primary key when the table's pkIsHandle is true
'schema.tests.SchemaTests.test_alter_int_pk_to_int_unique',
# Unsupported drop integer primary key
'schema.tests.SchemaTests.test_alter_not_unique_field_to_primary_key',
# Unsupported modify column: can't set auto_increment
'schema.tests.SchemaTests.test_alter_smallint_pk_to_smallautofield_pk',
# BLOB/TEXT/JSON column 'address' can't have a default value
'schema.tests.SchemaTests.test_alter_text_field_to_not_null_with_default_value',
# Unsupported modify column: this column has primary key flag
'schema.tests.SchemaTests.test_char_field_pk_to_auto_field',
# Unsupported modify charset from utf8mb4 to utf8
'schema.tests.SchemaTests.test_ci_cs_db_collation',
# Unsupported drop integer primary key
'schema.tests.SchemaTests.test_primary_key',
# wrong result
'schema.tests.SchemaTests.test_alter_pk_with_self_referential_field',
'schema.tests.SchemaTests.test_db_table',
'schema.tests.SchemaTests.test_indexes',
'schema.tests.SchemaTests.test_inline_fk',
'schema.tests.SchemaTests.test_remove_constraints_capital_letters',
'schema.tests.SchemaTests.test_remove_db_index_doesnt_remove_custom_indexes',
'schema.tests.SchemaTests.test_rename_column_renames_deferred_sql_references',
'schema.tests.SchemaTests.test_rename_referenced_field',
'schema.tests.SchemaTests.test_rename_table_renames_deferred_sql_references',
'schema.tests.SchemaTests.test_add_field_remove_field',
# Unknown column 'annotations_publisher.id' in 'where clause'
'annotations.tests.NonAggregateAnnotationTestCase.test_annotation_filter_with_subquery',
# Duplicate entry 'admin' for key 'username'
'auth_tests.test_admin_multidb.MultiDatabaseTests.test_add_view',
# Duplicate entry 'app_b-examplemodelb' for key 'django_content_type_app_label_model_76bd3d3b_uniq'
'auth_tests.test_models.LoadDataWithNaturalKeysAndMultipleDatabasesTestCase'
'.test_load_data_with_user_permissions',
'auth_tests.test_views.ChangelistTests.test_view_user_password_is_readonly',
'auth_tests.test_migrations.MultiDBProxyModelAppLabelTests',
'auth_tests.test_management.GetDefaultUsernameTestCase.test_with_database',
'backends.base.test_base.ExecuteWrapperTests.test_nested_wrapper_invoked',
'backends.base.test_base.ExecuteWrapperTests.test_outer_wrapper_blocks',
'backends.tests.FkConstraintsTests.test_check_constraints',
'backends.tests.FkConstraintsTests.test_check_constraints_sql_keywords',
# ignore multi database
'contenttypes_tests.test_models.ContentTypesMultidbTests.test_multidb',
# ContentType matching query does not exist.
'contenttypes_tests.test_models.ContentTypesTests.test_app_labeled_name',
# IntegrityError not raised
'constraints.tests.CheckConstraintTests.test_database_constraint',
'constraints.tests.CheckConstraintTests.test_database_constraint_expression',
'constraints.tests.CheckConstraintTests.test_database_constraint_expressionwrapper',
'constraints.tests.CheckConstraintTests.test_database_constraint_unicode',
# Cannot assign "<Book: Book object (90)>": the current database router prevents this relation.
'prefetch_related.tests.MultiDbTests.test_using_is_honored_custom_qs',
# django.http.response.Http404: No Article matches the given query.
'get_object_or_404.tests.GetObjectOr404Tests.test_get_object_or_404',
# django.db.transaction.TransactionManagementError: An error occurred in the current transaction.
# You can't execute queries until the end of the 'atomic' block.
'get_or_create.tests.UpdateOrCreateTests.test_integrity',
'get_or_create.tests.UpdateOrCreateTests.test_manual_primary_key_test',
'get_or_create.tests.UpdateOrCreateTestsWithManualPKs.test_create_with_duplicate_primary_key',
'db_functions.text.test_chr.ChrTests.test_non_ascii',
'db_functions.text.test_sha224.SHA224Tests.test_basic',
'db_functions.text.test_sha224.SHA224Tests.test_transform',
'db_functions.text.test_sha256.SHA256Tests.test_basic',
'db_functions.text.test_sha256.SHA256Tests.test_transform',
'db_functions.text.test_sha384.SHA384Tests.test_basic',
'db_functions.text.test_sha384.SHA384Tests.test_transform',
'db_functions.text.test_sha512.SHA512Tests.test_basic',
'db_functions.text.test_sha512.SHA512Tests.test_transform',
'db_functions.comparison.test_greatest.GreatestTests.test_basic',
'db_functions.comparison.test_least.LeastTests.test_basic',
'db_functions.datetime.test_extract_trunc.DateFunctionTests.test_trunc_time_func',
'migrations.test_commands.MigrateTests.test_migrate_fake_initial_case_insensitive',
'migrations.test_commands.MigrateTests.test_migrate_fake_split_initial',
'migrations.test_commands.MigrateTests.test_migrate_plan',
'migrations.test_executor.ExecutorTests.test_alter_id_type_with_fk',
'migrations.test_operations.OperationTests.test_add_binaryfield',
'migrations.test_operations.OperationTests.test_add_textfield',
'migrations.test_operations.OperationTests.test_alter_field_pk',
'migrations.test_operations.OperationTests.test_alter_field_reloads_state_on_fk_target_changes',
'migrations.test_operations.OperationTests.test_autofield__bigautofield_foreignfield_growth',
'migrations.test_operations.OperationTests.test_rename_field_reloads_state_on_fk_target_changes',
'migrations.test_operations.OperationTests.test_smallfield_autofield_foreignfield_growth',
'migrations.test_operations.OperationTests.test_smallfield_bigautofield_foreignfield_growth',
'migrations.test_loader.RecorderTests.test_apply',
'migrations.test_commands.MigrateTests.test_migrate_fake_initial',
'migrations.test_commands.MigrateTests.test_migrate_initial_false',
'migrations.test_commands.MigrateTests.test_migrate_syncdb_app_label',
'migrations.test_commands.MigrateTests.test_migrate_syncdb_deferred_sql_executed_with_schemaeditor',
'migrations.test_operations.OperationTests.test_add_constraint',
'migrations.test_operations.OperationTests.test_add_constraint_combinable',
'migrations.test_operations.OperationTests.test_add_constraint_percent_escaping',
'migrations.test_operations.OperationTests.test_add_or_constraint',
'migrations.test_operations.OperationTests.test_create_model_with_constraint',
'migrations.test_operations.OperationTests.test_remove_constraint',
# An error occurred in the current transaction. You can't execute queries until the end of the
# 'atomic' block." not found in 'Save with update_fields did not affect any rows.
'basic.tests.SelectOnSaveTests.test_select_on_save_lying_update',
'admin_views.test_multidb.MultiDatabaseTests.test_add_view',
'admin_views.test_multidb.MultiDatabaseTests.test_change_view',
'admin_views.test_multidb.MultiDatabaseTests.test_delete_view',
'admin_views.test_autocomplete_view.AutocompleteJsonViewTests.test_to_field_resolution_with_fk_pk',
'admin_views.test_autocomplete_view.AutocompleteJsonViewTests.test_to_field_resolution_with_mti',
'admin_views.tests.AdminSearchTest.test_exact_matches',
'admin_views.tests.AdminSearchTest.test_no_total_count',
'admin_views.tests.AdminSearchTest.test_search_on_sibling_models',
'admin_views.tests.GroupAdminTest.test_group_permission_performance',
'admin_views.tests.UserAdminTest.test_user_permission_performance',
'multiple_database.tests.AuthTestCase.test_dumpdata',
# about Pessimistic/Optimistic Transaction Model
'select_for_update.tests.SelectForUpdateTests.test_raw_lock_not_available',
}
}
if self.connection.tidb_version == (5, 0, 3):
skips.update({
"tidb503": {
'expressions_window.tests.WindowFunctionTests.test_subquery_row_range_rank',
'schema.tests.SchemaTests.test_alter_textual_field_keep_null_status',
# Unsupported modify column: column type conversion
# between 'varchar' and 'non-varchar' is currently unsupported yet
'schema.tests.SchemaTests.test_alter',
'schema.tests.SchemaTests.test_alter_field_type_and_db_collation',
'schema.tests.SchemaTests.test_alter_textual_field_keep_null_status',
}
})
if self.connection.tidb_version == (4, 0, 0):
skips.update({
"tidb400": {
'admin_filters.tests.ListFiltersTests.test_relatedfieldlistfilter_reverse_relationships',
'admin_filters.tests.ListFiltersTests.test_emptylistfieldfilter_reverse_relationships',
'aggregation.test_filter_argument.FilteredAggregateTests.test_filtered_numerical_aggregates',
'aggregation_regress.tests.AggregationTests.test_stddev',
'aggregation_regress.tests.AggregationTests.test_aggregate_fexpr',
'annotations.tests.NonAggregateAnnotationTestCase.test_raw_sql_with_inherited_field',
'auth_tests.test_models.UserWithPermTestCase.test_basic',
'generic_relations_regress.tests.GenericRelationTests.test_ticket_20378',
'queries.test_bulk_update.BulkUpdateNoteTests.test_functions',
'queries.tests.TestTicket24605.test_ticket_24605',
'queries.tests.Queries6Tests.test_tickets_8921_9188',
'schema.tests.SchemaTests.test_add_field_default_nullable'
}
})
if self.connection.tidb_version < (5,):
skips.update({
"tidb4": {
# Unsupported modify column
'schema.tests.SchemaTests.test_rename',
'schema.tests.SchemaTests.test_m2m_rename_field_in_target_model',
'schema.tests.SchemaTests.test_alter_textual_field_keep_null_status',
'schema.tests.SchemaTests.test_alter_text_field_to_time_field',
'schema.tests.SchemaTests.test_alter_text_field_to_datetime_field',
'schema.tests.SchemaTests.test_alter_text_field_to_date_field',
'schema.tests.SchemaTests.test_alter_field_type_and_db_collation',
# wrong result
'expressions_window.tests.WindowFunctionTests.test_subquery_row_range_rank',
'migrations.test_operations.OperationTests.test_alter_fk_non_fk',
'migrations.test_operations.OperationTests'
'.test_alter_field_reloads_state_on_fk_with_to_field_target_changes',
'model_fields.test_integerfield.PositiveIntegerFieldTests.test_negative_values',
}
})
if self.connection.tidb_version >= (4, 0, 5) and self.connection.tidb_version <= (4, 0, 9):
skips['tidb4'].add('lookup.tests.LookupTests.test_regex')
return skips
@cached_property
def update_can_self_select(self):
return True
@cached_property
def can_introspect_foreign_keys(self):
return False
@cached_property
def can_return_columns_from_insert(self):
return False
can_return_rows_from_bulk_insert = property(operator.attrgetter('can_return_columns_from_insert'))
@cached_property
def has_zoneinfo_database(self):
return self.connection.tidb_server_data['has_zoneinfo_database']
@cached_property
def is_sql_auto_is_null_enabled(self):
return self.connection.tidb_server_data['sql_auto_is_null']
@cached_property
def supports_over_clause(self):
return True
supports_frame_range_fixed_distance = property(operator.attrgetter('supports_over_clause'))
@cached_property
def supports_column_check_constraints(self):
return True
supports_table_check_constraints = property(operator.attrgetter('supports_column_check_constraints'))
@cached_property
def can_introspect_check_constraints(self):
return False
@cached_property
def has_select_for_update_skip_locked(self):
return False
@cached_property
def has_select_for_update_nowait(self):
return False
@cached_property
def has_select_for_update_of(self):
return False
@cached_property
def supports_explain_analyze(self):
return True
@cached_property
def supported_explain_formats(self):
return {'DOT', 'ROW', 'BRIEF'}
@cached_property
def ignores_table_name_case(self):
return self.connection.tidb_server_data['lower_case_table_names']
@cached_property
def supports_default_in_lead_lag(self):
return True
@cached_property
def supports_json_field(self):
return False
@cached_property
def can_introspect_json_field(self):
return self.supports_json_field and self.can_introspect_check_constraints
@cached_property
def supports_index_column_ordering(self):
return False
@cached_property
def supports_expression_indexes(self):
return self.connection.tidb_version >= (5, 1, )
| 55.286396 | 118 | 0.699763 | 22,492 | 0.970948 | 0 | 0 | 21,442 | 0.925621 | 0 | 0 | 15,536 | 0.670667 |
72404d3d39210b175e825c5b94b9e21a7e2698f1 | 421 | py | Python | src/combine_npy.py | hongli-ma/RNANetMotif | 34b4de443ec7edb59f4e4e06b17686543c438366 | [
"MIT"
]
| null | null | null | src/combine_npy.py | hongli-ma/RNANetMotif | 34b4de443ec7edb59f4e4e06b17686543c438366 | [
"MIT"
]
| null | null | null | src/combine_npy.py | hongli-ma/RNANetMotif | 34b4de443ec7edb59f4e4e06b17686543c438366 | [
"MIT"
]
| null | null | null | import numpy as np
import sys
import glob
rbp=sys.argv[1]
kmer=sys.argv[2]
pfile_list=glob.glob("result_VDM3_"+rbp+"_positive_"+kmer+"_*.npy")
pfile1=np.load(pfile_list[0])
psha=np.shape(pfile1)
pmatrix=np.zeros(psha)
for pfile in pfile_list:
file=np.load(pfile)
# file=np.fromfile(pfile,dtype=np.float32)
pmatrix+=file
np.save("positive_"+rbp+"_vdm3_nopaircontrol_distance_matrix_"+kmer+"mer.npy",pmatrix)
| 23.388889 | 86 | 0.750594 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 134 | 0.31829 |
7241a2c99b3dfd4732a6af0ad6cf19b2e1c6a517 | 1,238 | py | Python | fgvcdata/__init__.py | catalys1/fgvc-data-pytorch | e2666d011c71308c4975776fbc41e947424f0723 | [
"MIT"
]
| 4 | 2020-07-05T10:19:20.000Z | 2021-09-15T08:22:36.000Z | fgvcdata/__init__.py | catalys1/fgvc-data-pytorch | e2666d011c71308c4975776fbc41e947424f0723 | [
"MIT"
]
| 1 | 2020-11-13T22:01:47.000Z | 2020-11-13T22:01:47.000Z | fgvcdata/__init__.py | catalys1/fgvc-data-pytorch | e2666d011c71308c4975776fbc41e947424f0723 | [
"MIT"
]
| null | null | null | '''A common interface to FGVC datasets.
Currently supported datasets are
- CUB Birds
- CUB Birds with expert labels
- NA Birds
- Stanford Cars
- Stanford Dogs
- Oxford Flowers
- Oxford FGVC Aircraft
- Tsinghua Dogs
Datasets are constructed and used following the pytorch
data.utils.data.Dataset paradigm, and have the signature
fgvcdata.Dataset(root='path/to/data/'[,transform[,target_transform[,train]]])
`root` is the path to the base folder for the dataset. Additionally, `root` can
end in `/train` or `/test`, to indicate whether to use train or test data --
even if the root folder does not contain `train` or `test` subfolders.
The use of training or test data can also be specified through the use of the
`train` flag (the path extension on `root` takes precedence).
`transform` and `target_transform` are optional callables that preprocess data
and targets respectively. It is common to use the torchvision.transforms
module for this.
'''
from .birds import *
from .cars import *
from .dogs import *
from .aircraft import *
from .flowers import *
from .icub import *
IMAGENET_STATS = ((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
datasets = []
for f in [birds,icub,cars,dogs,aircraft,flowers]:
datasets += f.__all__
| 29.47619 | 79 | 0.747981 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 952 | 0.768982 |
72422e4892bc1b2767ffe8812f2e9e5e44e84b64 | 10,491 | py | Python | app/auth/api.py | Anti-Counter021/Anti-YouTube-back-end | eca9b26b4a1feb7e516c0164e86c5d6444af8db5 | [
"MIT"
]
| null | null | null | app/auth/api.py | Anti-Counter021/Anti-YouTube-back-end | eca9b26b4a1feb7e516c0164e86c5d6444af8db5 | [
"MIT"
]
| null | null | null | app/auth/api.py | Anti-Counter021/Anti-YouTube-back-end | eca9b26b4a1feb7e516c0164e86c5d6444af8db5 | [
"MIT"
]
| null | null | null | from typing import List
from celery.result import AsyncResult
from fastapi import APIRouter, status, Depends, Form, UploadFile, File, Request, WebSocket
from fastapi.responses import RedirectResponse
from app.auth import service
from app.auth.models import User
from app.auth.permission import is_active
from app.auth.schemas import (
RegisterUser,
VerificationUUID,
Tokens,
RefreshToken,
AccessToken,
Password,
ChangeUserDataResponse,
ChangeUserData,
Channel,
ChangePassword,
Tasks,
)
from app.config import oauth
from app.db import async_session
from app.schemas import Message
from app.videos.schemas import GetVideo, SubscriptionsVideos
auth_router = APIRouter()
@auth_router.post(
'/refresh',
status_code=status.HTTP_200_OK,
response_model=AccessToken,
description='Refresh token',
response_description='Access token',
name='Refresh token',
)
async def refresh(schema: RefreshToken):
async with async_session() as session:
async with session.begin():
return await service.refresh(session, schema)
@auth_router.post(
'/register',
status_code=status.HTTP_201_CREATED,
response_model=Message,
description='Registration new user',
response_description='User has been registration',
name='Registration',
)
async def register(schema: RegisterUser):
async with async_session() as session:
async with session.begin():
return await service.register(session, schema)
@auth_router.post(
'/activate',
status_code=status.HTTP_200_OK,
response_model=Message,
description='Activation account',
response_description='Account activated',
name='Activation account',
)
async def activate(schema: VerificationUUID):
async with async_session() as session:
async with session.begin():
return await service.activate(session, schema)
@auth_router.post(
'/login',
response_model=Tokens,
status_code=status.HTTP_200_OK,
description='Login',
response_description='Tokens',
name='Login',
)
async def login(username: str = Form(...), password: str = Form(...)):
async with async_session() as session:
async with session.begin():
return await service.login(session, username, password)
@auth_router.post(
'/follow',
response_model=Message,
status_code=status.HTTP_200_OK,
description='Follow',
response_description='Follow',
name='Follow',
)
async def follow(to_id: int, user: User = Depends(is_active)):
async with async_session() as session:
async with session.begin():
return await service.follow(session, to_id, user)
@auth_router.post(
'/unfollow',
response_model=Message,
status_code=status.HTTP_200_OK,
description='Unfollow',
response_description='Unfollow',
name='Unfollow',
)
async def unfollow(to_id: int, user: User = Depends(is_active)):
async with async_session() as session:
async with session.begin():
return await service.unfollow(session, to_id, user)
@auth_router.get(
'/request-password-reset',
response_model=Message,
status_code=status.HTTP_200_OK,
description='Request reset password',
response_description='Request reset password',
name='Request reset password',
)
async def create_reset_password(email: str):
async with async_session() as session:
async with session.begin():
return await service.create_reset_password(session, email)
@auth_router.post(
'/password-reset',
response_model=Message,
status_code=status.HTTP_200_OK,
description='Reset password',
response_description='Reset password',
name='Reset password',
)
async def verify_password_reset(token: str, schema: Password):
async with async_session() as session:
async with session.begin():
return await service.verify_password_reset(session, token, schema)
@auth_router.post(
'/username',
response_model=Message,
status_code=status.HTTP_200_OK,
description='Get username from email',
response_description='Get username from email',
name='Get username',
)
async def get_username(email: str):
async with async_session() as session:
async with session.begin():
return await service.get_username(session, email)
@auth_router.get(
'/change-data',
response_model=ChangeUserDataResponse,
status_code=status.HTTP_200_OK,
description='Get user data',
response_description='Get user data',
name='Get user data',
)
async def get_data(user: User = Depends(is_active)):
return await service.get_data(user)
@auth_router.put(
'/change-data',
response_model=ChangeUserDataResponse,
status_code=status.HTTP_200_OK,
description='Change user data',
response_description='Change user data',
name='Change user data',
)
async def change_data(schema: ChangeUserData, user: User = Depends(is_active)):
async with async_session() as session:
async with session.begin():
return await service.change_data(session, schema, user)
@auth_router.post(
'/avatar',
response_model=ChangeUserDataResponse,
status_code=status.HTTP_200_OK,
description='Upload avatar',
response_description='Upload avatar',
name='Upload avatar',
)
async def upload_avatar(avatar: UploadFile = File(...), user: User = Depends(is_active)):
async with async_session() as session:
async with session.begin():
return await service.upload_avatar(session, avatar, user)
@auth_router.get(
'/history',
response_model=List[GetVideo],
status_code=status.HTTP_200_OK,
description='Get history',
response_description='Get history',
name='History',
)
async def get_history(user: User = Depends(is_active)):
async with async_session() as session:
async with session.begin():
return await service.get_history(session, user)
@auth_router.get(
'/channel',
response_model=Channel,
status_code=status.HTTP_200_OK,
description='Get channel',
response_description='Get channel',
name='Get channel',
)
async def get_channel(pk: int, request: Request):
async with async_session() as session:
async with session.begin():
return await service.get_channel(session, pk, request)
@auth_router.get(
'/channel/videos/{pk}',
response_model=List[GetVideo],
status_code=status.HTTP_200_OK,
description='Get channel videos',
response_description='Get channel videos',
name='Get channel videos',
)
async def get_channel_videos(pk: int):
async with async_session() as session:
async with session.begin():
return await service.get_channel_videos(session, pk)
@auth_router.get(
'/followed',
response_model=List[SubscriptionsVideos],
status_code=status.HTTP_200_OK,
description='Subscriptions',
response_description='Subscriptions',
name='Subscriptions',
)
async def subscriptions(user: User = Depends(is_active)):
async with async_session() as session:
async with session.begin():
return await service.subscriptions(session, user)
@auth_router.put(
'/change-password',
response_model=Message,
status_code=status.HTTP_200_OK,
description='Change password',
response_description='Change password',
name='Change password',
)
async def change_password(schema: ChangePassword, user: User = Depends(is_active)):
async with async_session() as session:
async with session.begin():
return await service.change_password(session, schema, user)
@auth_router.post(
'/2-auth',
response_model=Tokens,
status_code=status.HTTP_200_OK,
description='2-step login',
response_description='2-step login',
name='2-step login',
)
async def two_auth(username: str = Form(...), password: str = Form(...), code: str = Form(...)):
async with async_session() as session:
async with session.begin():
return await service.two_auth(session, username, password, code)
@auth_router.get(
'/2-auth-toggle',
response_model=Message,
status_code=status.HTTP_200_OK,
description='On/off 2-step',
response_description='On/off 2-step',
name='On/off 2-step',
)
async def toggle_2step_auth(user: User = Depends(is_active)):
async with async_session() as session:
async with session.begin():
return await service.toggle_2step_auth(session, user)
@auth_router.get(
'/google-login',
description='Google login',
response_description='Google login',
name='Google login',
status_code=status.HTTP_200_OK,
)
async def google_login(request: Request):
redirect_uri = 'http://localhost:8000/api/v1/auth/google-auth'
return await oauth.google.authorize_redirect(request, redirect_uri)
@auth_router.get(
'/google-auth',
response_class=RedirectResponse,
description='Google auth',
response_description='Google auth',
name='Google auth',
status_code=status.HTTP_200_OK,
)
async def google_auth(request: Request):
async with async_session() as session:
async with session.begin():
token = await oauth.google.authorize_access_token(request)
user = await oauth.google.parse_id_token(request, token)
return await service.google_auth(session, user)
@auth_router.post(
'/export',
response_model=Tasks,
status_code=status.HTTP_200_OK,
description='Export data',
response_description='Export data',
name='Export data',
)
async def export(user: User = Depends(is_active)):
async with async_session() as session:
async with session.begin():
return await service.export(session, user)
@auth_router.websocket('/task-status')
async def task_status(websocket: WebSocket):
await websocket.accept()
while True:
task_id = await websocket.receive_text()
task = AsyncResult(task_id)
if task.status == 'PENDING':
response = {
'state': task.state,
'progress': 0,
}
await websocket.send_json(response)
else:
response = {
'state': task.state,
'progress': task.info.get('progress', 0),
}
await websocket.send_json(response)
if task.info.get('progress') == 100:
break
| 29.304469 | 96 | 0.691354 | 0 | 0 | 0 | 0 | 9,708 | 0.925365 | 5,090 | 0.485178 | 1,417 | 0.135068 |
7242536c3707c16822eadee50c71c7b05cdd3796 | 7,768 | py | Python | concourse/steps/scan_container_images.py | jia-jerry/cc-utils | 01322d2acb7343c92138dcf0b6ac913b276525bc | [
"Apache-2.0"
]
| null | null | null | concourse/steps/scan_container_images.py | jia-jerry/cc-utils | 01322d2acb7343c92138dcf0b6ac913b276525bc | [
"Apache-2.0"
]
| null | null | null | concourse/steps/scan_container_images.py | jia-jerry/cc-utils | 01322d2acb7343c92138dcf0b6ac913b276525bc | [
"Apache-2.0"
]
| null | null | null | # Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed
# under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import textwrap
import typing
import tabulate
import clamav.util
import mailutil
from concourse.model.traits.image_scan import Notify
from product.model import ComponentName, UploadResult
class MailRecipients(object):
def __init__(
self,
root_component_name: str,
protecode_cfg,
protecode_group_id: int,
protecode_group_url: str,
cfg_set,
result_filter=None,
recipients: typing.List[str]=[],
recipients_component: ComponentName=None,
):
self._root_component_name = root_component_name
self._result_filter = result_filter
self._protecode_results = []
self._clamav_results = []
self._cfg_set = cfg_set
if not bool(recipients) ^ bool(recipients_component):
raise ValueError('exactly one of recipients, component_name must be given')
self._recipients = recipients
self._recipients_component= recipients_component
self._protecode_cfg = protecode_cfg
self._protecode_group_id = protecode_group_id
self._protecode_group_url = protecode_group_url
@functools.lru_cache()
def resolve_recipients(self):
if not self._recipients_component:
return self._recipients
# XXX it should not be necessary to pass github_cfg
return mailutil.determine_mail_recipients(
github_cfg_name=self._cfg_set.github().name(),
component_names=(self._recipients_component.name(),),
)
def add_protecode_results(self, results: typing.Iterable[typing.Tuple[UploadResult, int]]):
print(f'adding protecode results for {self}')
for result in results:
if self._result_filter:
if not self._result_filter(component=result[0].component):
print(f'did not match: {result[0].component.name()}')
continue
self._protecode_results.append(result)
def add_clamav_results(self, results):
for result in results:
self._clamav_results.append(result)
def has_results(self):
if self._protecode_results:
return True
if self._clamav_results:
return True
def mail_body(self):
parts = []
parts.append(self._mail_disclaimer())
parts.append(protecode_results_table(
protecode_cfg=self._protecode_cfg,
upload_results=self._protecode_results,
)
)
parts.append(self._clamav_report())
return ''.join(parts)
def _mail_disclaimer(self):
return textwrap.dedent(f'''
<div>
<p>
Note: you receive this E-Mail, because you were configured as a mail recipient
in repository "{self._root_component_name}" (see .ci/pipeline_definitions)
To remove yourself, search for your e-mail address in said file and remove it.
</p>
<p>
The following components in Protecode-group
<a href="{self._protecode_group_url}">{self._protecode_group_id}</a>
were found to contain critical vulnerabilities:
</p>
</div>
''')
def _clamav_report(self):
if not self._clamav_results:
return textwrap.dedent(f'''
<p>Scanned all container image(s) for matching virus signatures
without any matches (id est: all container images seem to be free of known malware)
''')
result = '<p><div>Virus Scanning Results</div>'
return result + tabulate.tabulate(
self._clamav_results,
headers=('Image-Reference', 'Scanning Result'),
tablefmt='html',
)
def __repr__(self):
if self._recipients_component:
descr = f'component {self._recipients_component.name()}'
else:
descr = 'for all results'
return 'MailRecipients: ' + descr
def mail_recipients(
notification_policy: Notify,
root_component_name:str,
protecode_cfg,
protecode_group_id: int,
protecode_group_url: str,
cfg_set,
email_recipients: typing.Iterable[str]=(),
components: typing.Iterable[ComponentName]=(),
):
mail_recps_ctor = functools.partial(
MailRecipients,
root_component_name=root_component_name,
protecode_cfg=protecode_cfg,
protecode_group_id=protecode_group_id,
protecode_group_url=protecode_group_url,
cfg_set=cfg_set,
)
notification_policy = Notify(notification_policy)
if notification_policy == Notify.EMAIL_RECIPIENTS:
if not email_recipients:
raise ValueError('at least one email_recipient must be specified')
# exactly one MailRecipients, catching all (hence no filter)
yield mail_recps_ctor(
recipients=email_recipients,
)
elif notification_policy == Notify.NOBODY:
return
elif notification_policy == Notify.COMPONENT_OWNERS:
def make_comp_filter(own_component):
def comp_filter(component):
print(f'filter: component: {own_component.name()} - other: {component.name()}')
return own_component.name() == component.name() # only care about matching results
return comp_filter
for comp in components:
yield mail_recps_ctor(
recipients_component=comp,
result_filter=make_comp_filter(own_component=comp)
)
else:
raise NotImplementedError()
def virus_scan_images(image_references: typing.Iterable[str]):
for image_reference in image_references:
status, signature = clamav.util.scan_container_image(image_reference=image_reference)
if clamav.util.result_ok(status=status, signature=signature):
continue
yield (image_reference, f'{status}: {signature}')
def protecode_results_table(protecode_cfg, upload_results: typing.Iterable[UploadResult]):
def result_to_tuple(upload_result: UploadResult):
# upload_result tuple of product.model.UploadResult and CVE Score
upload_result, greatest_cve = upload_result
# protecode.model.AnalysisResult
analysis_result = upload_result.result
name = analysis_result.display_name()
analysis_url = \
f'{protecode_cfg.api_url()}/products/{analysis_result.product_id()}/#/analysis'
link_to_analysis_url = f'<a href="{analysis_url}">{name}</a>'
custom_data = analysis_result.custom_data()
if custom_data is not None:
image_reference = custom_data.get('IMAGE_REFERENCE')
else:
image_reference = None
return [link_to_analysis_url, greatest_cve, image_reference]
table = tabulate.tabulate(
map(result_to_tuple, upload_results),
headers=('Component Name', 'Greatest CVE', 'Container Image Reference'),
tablefmt='html',
)
return table
| 36.299065 | 99 | 0.660788 | 3,806 | 0.489959 | 1,944 | 0.250257 | 382 | 0.049176 | 0 | 0 | 2,415 | 0.310891 |
72430bcb51d12558e07e88c7e1a6d221c05d6f85 | 647 | py | Python | py/cv/video.py | YodaEmbedding/experiments | 567c6a1c18fac2d951fe2af54aaa4917b7d529d2 | [
"MIT"
]
| null | null | null | py/cv/video.py | YodaEmbedding/experiments | 567c6a1c18fac2d951fe2af54aaa4917b7d529d2 | [
"MIT"
]
| null | null | null | py/cv/video.py | YodaEmbedding/experiments | 567c6a1c18fac2d951fe2af54aaa4917b7d529d2 | [
"MIT"
]
| null | null | null | import cv2
import numpy as np
height = 500
width = 700
gray = np.zeros((height, width), dtype=np.uint8)
# fourcc = cv2.VideoWriter_fourcc(*"MJPG")
# filename = "output.avi"
fourcc = cv2.VideoWriter_fourcc(*"MP4V")
filename = "output.mp4"
writer = cv2.VideoWriter(
filename, fourcc, fps=30, frameSize=(width, height), isColor=False
)
# NOTE isColor doesn't seem to influence resulting file size
xs = np.arange(width // 10)
ys = np.arange(height // 10)
locations = np.dstack(np.meshgrid(ys, xs)).reshape(-1, 2)
for y, x in locations:
gray[y, x] = 255
# gray_3c = cv2.merge([gray, gray, gray])
writer.write(gray)
writer.release()
| 24.884615 | 70 | 0.689335 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 186 | 0.287481 |
72450375a565716f2e2d7e0a06b152a00332048e | 1,062 | py | Python | po/loginpage.py | imzengyang/datadrivertestexample | a37520c2f5f0ae6dfbcaaa371586ba7e98540537 | [
"MIT"
]
| 1 | 2018-06-03T05:31:46.000Z | 2018-06-03T05:31:46.000Z | po/loginpage.py | imzengyang/datadrivertestexample | a37520c2f5f0ae6dfbcaaa371586ba7e98540537 | [
"MIT"
]
| null | null | null | po/loginpage.py | imzengyang/datadrivertestexample | a37520c2f5f0ae6dfbcaaa371586ba7e98540537 | [
"MIT"
]
| null | null | null |
from po.base import BasePage
from po.base import InvalidPageException
class LoginPage(BasePage):
_login_view_locator = ".active"
_login_name_selector = "#name"
_login_passwd_selector = "#pass"
_login_btn_selector = ".span-primary"
_login_error_msg_selector='.alert strong'
def __init__(self, driver):
super(LoginPage, self).__init__(driver)
def user_login(self,username,passwd):
self.driver.find_element_by_css_selector(self._login_name_selector).send_keys(username)
self.driver.find_element_by_css_selector(self._login_passwd_selector).send_keys(passwd)
self.driver.find_element_by_css_selector(self._login_btn_selector).click()
@property
def error_msg(self):
return self.driver.find_element_by_css_selector(self._login_error_msg_selector).text.strip()
# def _validate_page(self, driver):
# try:
# driver.find_element_by_css_selector(self._product_view_locator).text
# except:
# raise InvalidPageException('login page not loaded') | 33.1875 | 100 | 0.733522 | 990 | 0.932203 | 0 | 0 | 135 | 0.127119 | 0 | 0 | 251 | 0.236347 |
724561c601c848d5d6d0e629507abb99ee03ff0a | 1,329 | py | Python | app.py | jesseokeya/linkedin-scraper | 6b9d5af5167c8c936db63a855a9885728efbfeb5 | [
"MIT"
]
| null | null | null | app.py | jesseokeya/linkedin-scraper | 6b9d5af5167c8c936db63a855a9885728efbfeb5 | [
"MIT"
]
| 1 | 2020-01-04T19:33:58.000Z | 2021-09-07T15:03:03.000Z | app.py | jesseokeya/linkedin-scraper | 6b9d5af5167c8c936db63a855a9885728efbfeb5 | [
"MIT"
]
| 1 | 2021-12-02T06:51:46.000Z | 2021-12-02T06:51:46.000Z | from lib import Scrape
from typing import List
from os import environ
def main():
seconds: int = 60
username: str = environ.get('EMAIL')
password: str = environ.get('PASSWORD')
# Navigates to Linkedin's website
scraper = Scrape()
# Takes in credentials to login into the url sepecified
scraper.login(username=username, password=password)
# Navigate to specified pages on the website
scraper.navigate_to('profile', duration=2)
scraper.navigate_to(
multiple=['notifications', 'messages', 'network', 'jobs', 'home'],
duration=2
)
# Scroll to the bottom of page for 10 seconds
# The longer you scroll the more data you collect from linkedin
scraper.scroll_to_bottom(10)
# Returns a list of all images on website
images: List[str] = scraper.retrieve_images()
# Returns a list of all videos on website
videos: List[str] = scraper.retrieve_videos()
# Build data scrapped into a set
file_data: set = {
'images': images,
'videos': videos
}
# print scrapped information before saving to file
print(file_data)
# create and write file data to json file
scraper.write_file(file_data, 'data.json')
# Uncomment to end the selenium chrome driver after 60 seconds
# scraper.end(seconds)
main()
| 27.122449 | 74 | 0.678706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 628 | 0.472536 |
724593364a3fe88c699961a3b8ddb8f17f617e15 | 100 | py | Python | Loops/for_in.py | 1302580MK/Udemy_Python | c7aef0645ae15a954c2356ba96288deaa087fb32 | [
"MIT"
]
| null | null | null | Loops/for_in.py | 1302580MK/Udemy_Python | c7aef0645ae15a954c2356ba96288deaa087fb32 | [
"MIT"
]
| null | null | null | Loops/for_in.py | 1302580MK/Udemy_Python | c7aef0645ae15a954c2356ba96288deaa087fb32 | [
"MIT"
]
| null | null | null | var1 = "hello world"
# left what you get from the right
for character in var1:
print(character) | 20 | 34 | 0.72 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 47 | 0.47 |
7247650bb946b4cd8155dc4709b0e70976c42ea4 | 546 | py | Python | forum/migrations/0003_auto_20190307_1825.py | AdityaJ42/DJ-Comps-Book-Exchange | 12bba45f016e1b708477c642c2595b7f15e3dcfc | [
"MIT"
]
| null | null | null | forum/migrations/0003_auto_20190307_1825.py | AdityaJ42/DJ-Comps-Book-Exchange | 12bba45f016e1b708477c642c2595b7f15e3dcfc | [
"MIT"
]
| null | null | null | forum/migrations/0003_auto_20190307_1825.py | AdityaJ42/DJ-Comps-Book-Exchange | 12bba45f016e1b708477c642c2595b7f15e3dcfc | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2019-03-07 12:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('forum', '0002_upvote'),
]
operations = [
migrations.AlterField(
model_name='upvote',
name='comment',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='upvotes', to='forum.Comment'),
),
]
| 24.818182 | 125 | 0.64652 | 355 | 0.650183 | 0 | 0 | 0 | 0 | 0 | 0 | 132 | 0.241758 |
724a427f96e1eeeba039df150a14e4acaeb34725 | 495 | py | Python | datahub/event/migrations/0018_move_to_metadata_trade_agreement.py | Staberinde/data-hub-api | 3d0467dbceaf62a47158eea412a3dba827073300 | [
"MIT"
]
| 6 | 2019-12-02T16:11:24.000Z | 2022-03-18T10:02:02.000Z | datahub/event/migrations/0018_move_to_metadata_trade_agreement.py | Staberinde/data-hub-api | 3d0467dbceaf62a47158eea412a3dba827073300 | [
"MIT"
]
| 1,696 | 2019-10-31T14:08:37.000Z | 2022-03-29T12:35:57.000Z | datahub/event/migrations/0018_move_to_metadata_trade_agreement.py | Staberinde/data-hub-api | 3d0467dbceaf62a47158eea412a3dba827073300 | [
"MIT"
]
| 9 | 2019-11-22T12:42:03.000Z | 2021-09-03T14:25:05.000Z | # Generated by Django 3.1.7 on 2021-04-08 11:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('metadata', '0009_tradeagreement'),
('event', '0017_add_related_trade_agreement_fields'),
]
operations = [
migrations.AlterField(
model_name='event',
name='related_trade_agreements',
field=models.ManyToManyField(blank=True, to='metadata.TradeAgreement'),
),
]
| 24.75 | 83 | 0.638384 | 402 | 0.812121 | 0 | 0 | 0 | 0 | 0 | 0 | 184 | 0.371717 |
724a51915fb64beb06bbeb6fa5488524f3f99f3f | 16,905 | py | Python | pbs/scripts/update_burn_dataOLD.py | jawaidm/pbs | 87f5c535c976d6a5eccbfbbf2073589b6e366d04 | [
"Apache-2.0"
]
| null | null | null | pbs/scripts/update_burn_dataOLD.py | jawaidm/pbs | 87f5c535c976d6a5eccbfbbf2073589b6e366d04 | [
"Apache-2.0"
]
| 12 | 2019-10-22T23:16:38.000Z | 2022-03-11T23:17:45.000Z | pbs/scripts/update_burn_dataOLD.py | jawaidm/pbs | 87f5c535c976d6a5eccbfbbf2073589b6e366d04 | [
"Apache-2.0"
]
| 5 | 2019-12-19T06:18:42.000Z | 2022-01-07T01:16:18.000Z | ''' ----------------------------------------------------------------------------------------
This script will update the Prescribed Burn System's ePFP data according to txt input
files found in the relevant scripts folder.
It requires user input of the Corporate Executive Approval date, which it will then use
to set PREVIOUS_YEAR, TARGET_YEAR, DATE_APPROVED and DATE_APPROVED_TO variables used by
relevant functions in the script.
Sample output below:
Please enter the date that the Burn Program was approved by Corporate Executive (dd/mm/yyyy): 03/07/2019
Script will run with the following details:
- Previous Year: 2018/2019
- Target Year: 2019/2020
- Script Data Folder: pbs/scripts/eofy_data/2019
Do you wish to continue [y/n]? y
Updating financial year and setting planning status modified date for carry over currently approved ePFPs from 2018/2019.
Total prescriptions in query: 331
Financial year for ABC_123(2013/2014) is not 2018/2019 or already in 2019/2020.
Updated financial year and set planning status modified date for 330 carry over currently approved ePFPs
Applying corporate approval and setting planning status modified date for ePFPs currently seeking approval in 2019/2020.
Total prescriptions in query: 51
Applied corporate approval and set planning status modified date for 51 ePFPs that were seeking approval
Updating financial year only selected ePFPs from 2018/2019
Total prescriptions in query: 330
Financial year for ABC_123(2013/2014) is not 2018/2019.
Updated financial year only for 0 ePFPs
329 records already in 2019/2020
Updating priority for selected ePFPs in 2019/2020
Financial year for ABC_123(2013/2014) is not 2019/2020.
Updated priority for 412 ePFPs (expected 412)
Updating area for selected ePFPs in 2019/2020
Financial year for ABC_123(2013/2014) is not 2019/2020.
Updated area for 412 ePFPs (expected 412)
Updating perimeters for selected ePFPs in 2019/2020
Financial year for ABC_123(2013/2014) is not 2019/2020.
Updated perimeter for 412 ePFPs (expected 412)
Updating overall rationale for selected ePFPs in 2019/2020
Financial year for ABC_123(2013/2014) is not 2019/2020.
Updated rationale for 168 ePFPs (expected 168)
----------------------------------------------------------------------------------------
'''
import os
import sys
import confy
from django.db import transaction
from django.core.wsgi import get_wsgi_application
from decimal import Decimal
import csv
from datetime import datetime, date
import pytz
application = get_wsgi_application() # This is so models get loaded.
try:
confy.read_environment_file('.env')
except:
print('ERROR: Script must be runs from PROJECT BASE_DIR')
exit()
proj_path = os.getcwd()
sys.path.append(proj_path)
os.chdir(proj_path)
# ----------------------------------------------------------------------------------------
# Script starts here
# ----------------------------------------------------------------------------------------
from pbs.prescription.models import Prescription
def read_ids(filename):
return [i[0] for i in list(csv.reader(open(filename), delimiter=',', quotechar='"'))]
def read_ids_pipe_separated(filename):
return [i[0] for i in list(csv.reader(open(filename), delimiter='|', quotechar='"'))]
def read_id_tuples(filename):
return [tuple(i) for i in list(csv.reader(open(filename), delimiter=',', quotechar='"'))]
def read_id_tuples_pipe_separated(filename):
return [tuple(i) for i in list(csv.reader(open(filename), delimiter='|', quotechar='"'))]
def carryover_currently_approved(ids):
# Used to update the financial year of existing corporate approved ePFPs from previous year
# Also updates the date modified of the Planning Status (approval date)
print('\nUpdating financial year and setting planning status '
'modified date for carry over currently approved ePFPs from {}.'.format(PREVIOUS_YEAR))
count = 0
print('Total prescriptions in query: {}'.format(Prescription.objects.filter(burn_id__in=ids).count()))
for p in Prescription.objects.filter(burn_id__in=ids).order_by('burn_id'):
# Only update if the ePFP is in PREVIOUS_YEAR or if it has already been changed to TARGET_YEAR
# and no remaining records for PREVIOUS_YEAR exist
if (p.financial_year == PREVIOUS_YEAR or
(p.financial_year == TARGET_YEAR and
Prescription.objects.filter(burn_id=p.burn_id, financial_year=PREVIOUS_YEAR).count() == 0)):
if p.planning_status != 3: # only apply if corporate approved
print('Planning status for {} is not currently corporate approved.'.format(str(p.burn_id)))
else:
if p.status != 2: # burn status must not be closed
p.financial_year = TARGET_YEAR
p.planning_status_modified = DATE_APPROVED
p.save()
count += 1
else:
print('Burn status for {} is closed'.format(str(p.burn_id)))
else:
print('Financial year for {}({}) is not {} or already in {}.'
.format(str(p.burn_id), p.financial_year, PREVIOUS_YEAR, TARGET_YEAR))
print('Updated financial year and set planning status modified date '
'for {} carry over currently approved ePFPs'.format(count))
def update_seeking_approval(ids):
# Used to apply corporate approval to ePFPs (usually for list of ePFPs currently seeking approval)
# Also updates the date modified of the Planning Status (approval date)
print('\nApplying corporate approval and setting planning status'
' modified date for ePFPs currently seeking approval in {}.'.format(TARGET_YEAR))
count = 0
print('Total prescriptions in query: {}'.format(Prescription.objects.filter(burn_id__in=ids).count()))
for p in Prescription.objects.filter(burn_id__in=ids).order_by('burn_id'):
# Only update if the ePFP is in PREVIOUS_YEAR or if it has already been changed to TARGET_YEAR
# and no remaining records for PREVIOUS_YEAR exist
if (p.financial_year == PREVIOUS_YEAR or
(p.financial_year == TARGET_YEAR and
Prescription.objects.filter(burn_id=p.burn_id, financial_year=PREVIOUS_YEAR).count() == 0)):
if p.planning_status != 2: # only apply if seeking corporate approval
print('Planning status for {} is not currently seeking corporate approval.'.format(str(p.burn_id)))
else:
if p.status != 2: # burn status must not be closed
p.planning_status = 3 # corporate approved
p.planning_status_modified = DATE_APPROVED
p.save()
count += 1
else:
print('Burn status for {} is closed'.format(str(p.burn_id)))
else:
print('Financial year for {}({}) is not {} or already in {}.'
.format(str(p.burn_id), p.financial_year, PREVIOUS_YEAR, TARGET_YEAR))
print('Applied corporate approval and set planning status modified date '
'for {} ePFPs that were seeking approval'.format(count))
def update_financial_year(ids):
# Used to update the financial year of existing ePFPs from previous year
print('\nUpdating financial year only selected ePFPs from {}'.format(PREVIOUS_YEAR))
count = 0
already_in_target_year = 0
print('Total prescriptions in query: {}'.format(Prescription.objects.filter(burn_id__in=ids).count()))
for p in Prescription.objects.filter(burn_id__in=ids).order_by('burn_id'):
if p.financial_year != PREVIOUS_YEAR:
if p.financial_year == TARGET_YEAR:
already_in_target_year += 1
else:
print('Financial year for {}({}) is not {}.'.format(str(p.burn_id), p.financial_year, PREVIOUS_YEAR))
else:
p.financial_year = TARGET_YEAR
p.save()
count += 1
print('Updated financial year only for {} ePFPs'.format(count))
print('{} records already in {}'.format(already_in_target_year, TARGET_YEAR))
def update_seeking_approval_2(ids):
# Used to apply corporate approval to ePFPs (usually for list of ePFPs currently seeking approval)
# from previous year
# Also updates the date modified of the Planning Status (approval date)
# and updates the valid_to date of an existing approval to enable access to the ePFP
# as part of the Daily Burn Program 268 process
print('\nApplying corporate approval and setting planning status modified date '
'for ePFPs currently seeking approval in {}.'.format(TARGET_YEAR))
print('Also set valid_to date for approval for Daily Burn Program access')
count = 0
print('Total prescriptions in query: {}'.format(Prescription.objects.filter(burn_id__in=ids).count()))
for p in Prescription.objects.filter(burn_id__in=ids).order_by('burn_id'):
if p.financial_year != TARGET_YEAR:
print('Financial year for {}({}) is not {}.'.format(str(p.burn_id), p.financial_year, TARGET_YEAR))
else:
if p.planning_status != 2: # only apply if seeking corporate approval
print('Planning status for {} is not currently seeking corporate approval.'.format(str(p.burn_id)))
else:
if p.status != 2: # burn status must not be closed
p.planning_status = 3 # corporate approved
p.financial_year = TARGET_YEAR # season
p.planning_status_modified = DATE_APPROVED # approval date
a = p.approval_set.all()
if len(a) > 0:
a[0].valid_to = DATE_APPROVED_TO # approved until date
a[0].save()
p.save()
count += 1
else:
print('Burn status for {} is closed'.format(str(p.burn_id)))
print('Applied corporate approval and set planning status modified date '
'for {} ePFPs that were seeking approval'.format(count))
def update_burn_priority(id_priority_pairs):
# Used to update the priority of the listed ePFPs
print('\nUpdating priority for selected ePFPs in {}'.format(TARGET_YEAR))
count = 0
data_count = 0
for burn_id, priority in id_priority_pairs:
for p in Prescription.objects.filter(burn_id=burn_id).order_by('burn_id'):
if p.financial_year != TARGET_YEAR:
print('Financial year for {}({}) is not {}.'.format(str(p.burn_id), p.financial_year, TARGET_YEAR))
else:
p.priority = int(priority)
p.save()
count += 1
data_count += 1
print 'Updated priority for {} ePFPs (expected {})'.format(count, data_count)
def update_burn_areas(id_burn_area_pairs):
# Used to update the area of the listed ePFPs
print('\nUpdating area for selected ePFPs in {}'.format(TARGET_YEAR))
count = 0
data_count = 0
for burn_id, area in id_burn_area_pairs:
for p in Prescription.objects.filter(burn_id=burn_id).order_by('burn_id'):
if p.financial_year != TARGET_YEAR:
print('Financial year for {}({}) is not {}.'.format(str(p.burn_id), p.financial_year, TARGET_YEAR))
else:
p.area = Decimal(area)
p.save()
count += 1
data_count += 1
print 'Updated area for {} ePFPs (expected {})'.format(count, data_count)
def update_burn_perimeters(id_perimeter_pairs):
# Used to update the perimeters of the listed ePFPs
print('\nUpdating perimeters for selected ePFPs in {}'.format(TARGET_YEAR))
count = 0
data_count = 0
for burn_id, perimeter in id_perimeter_pairs:
for p in Prescription.objects.filter(burn_id=burn_id).order_by('burn_id'):
if p.financial_year != TARGET_YEAR:
print('Financial year for {}({}) is not {}.'.format(str(p.burn_id), p.financial_year, TARGET_YEAR))
else:
p.perimeter = Decimal(perimeter)
p.save()
count += 1
data_count += 1
print 'Updated perimeter for {} ePFPs (expected {})'.format(count, data_count)
def update_overall_rationales(id_overall_rationale_pairs):
# Used to update the overall rationale of the listed ePFPs
print('\nUpdating overall rationale for selected ePFPs in {}'.format(TARGET_YEAR))
count = 0
data_count = 0
for burn_id, rationale in id_overall_rationale_pairs:
for p in Prescription.objects.filter(burn_id=burn_id).order_by('burn_id'):
if p.financial_year != TARGET_YEAR:
print('Financial year for {}({}) is not {}.'.format(str(p.burn_id), p.financial_year, TARGET_YEAR))
else:
p.rationale = rationale
p.save()
count += 1
data_count += 1
print 'Updated rationale for {} ePFPs (expected {})'.format(count, data_count)
def print_burnstate_reviewed(ids):
count = 0
for p in Prescription.objects.filter(burn_id__in=ids, financial_year=TARGET_YEAR).order_by('burn_id'):
if p.burnstate.all(): # and not reviewable(p):
print p.burn_id, ';', [i.record for i in p.burnstate.all()], ';', reviewable(p)
def delete_burnstate_unreviewed(ids):
count = 0
for p in Prescription.objects.filter(burn_id__in=ids, financial_year=TARGET_YEAR).order_by('burn_id'):
if p.burnstate.all() and not reviewable(p):
b = p.burnstate.all()
b.delete()
def reviewable(prescription):
p = Prescription.objects.filter(
id=prescription.id,
approval_status=Prescription.APPROVAL_APPROVED,
status=Prescription.STATUS_OPEN,
ignition_status__in=[Prescription.IGNITION_NOT_STARTED, Prescription.IGNITION_COMMENCED],
financial_year=TARGET_YEAR
).order_by('burn_id')
return True if p else False
if __name__ == "__main__":
try:
SCRIPT_FOLDER = 'pbs/scripts'
DATE_APPROVED_INPUT = raw_input("Please enter the date that the Burn Program was approved "
"by Corporate Executive (dd/mm/yyyy): ")
DATE_APPROVED = datetime.strptime(DATE_APPROVED_INPUT, '%d/%m/%Y').replace(tzinfo=pytz.UTC)
if DATE_APPROVED.month != 7 or DATE_APPROVED.year != date.today().year:
print('Can only run this script in July of the current year')
sys.exit()
DATE_APPROVED_TO = date(DATE_APPROVED.year, 9, 30)
PREVIOUS_YEAR = '{}/{}'.format(DATE_APPROVED.year-1, DATE_APPROVED.year)
TARGET_YEAR = '{}/{}'.format(DATE_APPROVED.year, DATE_APPROVED.year+1)
SCRIPT_DATA_FOLDER = '{}/eofy_data/{}'.format(SCRIPT_FOLDER, TARGET_YEAR.split('/')[0])
except BaseException:
print('Error')
sys.exit()
print('\nScript will run with the following details:')
print(' - Previous Year: {}'.format(PREVIOUS_YEAR))
print(' - Target Year: {}'.format(TARGET_YEAR))
print(' - Script Data Folder: {}/'.format(SCRIPT_DATA_FOLDER))
CONTINUE_INPUT = raw_input("Do you wish to continue [y/n]? ")
if CONTINUE_INPUT == 'y':
try:
with transaction.atomic():
corp_approved_carryover_ids = read_ids('{}/corp_approved_carryover.txt'.format(SCRIPT_DATA_FOLDER))
carryover_currently_approved(corp_approved_carryover_ids)
seeking_approval_ids = read_ids('{}/approve_seeking_approval.txt'.format(SCRIPT_DATA_FOLDER))
update_seeking_approval(seeking_approval_ids)
update_financial_year_ids = read_ids('{}/financial_year_only.txt'.format(SCRIPT_DATA_FOLDER))
update_financial_year(update_financial_year_ids)
burn_priority_tuples = read_id_tuples('{}/burn_priority.txt'.format(SCRIPT_DATA_FOLDER))
update_burn_priority(burn_priority_tuples)
burn_area_tuples = read_id_tuples('{}/burn_areas.txt'.format(SCRIPT_DATA_FOLDER))
update_burn_areas(burn_area_tuples)
burn_perimeter_tuples = read_id_tuples('{}/burn_perimeters.txt'.format(SCRIPT_DATA_FOLDER))
update_burn_perimeters(burn_perimeter_tuples)
overall_rationale_tuples = read_id_tuples_pipe_separated('{}/overall_rationales.txt'
.format(SCRIPT_DATA_FOLDER))
update_overall_rationales(overall_rationale_tuples)
except BaseException:
print('Error')
else:
sys.exit()
| 47.089136 | 125 | 0.644898 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,924 | 0.409583 |
724b92184d8f2e9819e55008805cce856be796bd | 4,012 | py | Python | learnware/algorithm/anomaly_detect/iforest.py | marvinren/aiops_gaussian_learnware | 47683546d6648a38bb71988c33f959cf7308376f | [
"Apache-2.0"
]
| null | null | null | learnware/algorithm/anomaly_detect/iforest.py | marvinren/aiops_gaussian_learnware | 47683546d6648a38bb71988c33f959cf7308376f | [
"Apache-2.0"
]
| null | null | null | learnware/algorithm/anomaly_detect/iforest.py | marvinren/aiops_gaussian_learnware | 47683546d6648a38bb71988c33f959cf7308376f | [
"Apache-2.0"
]
| null | null | null | import numpy as np
from scipy.stats import binom
from sklearn.ensemble import IsolationForest
from sklearn.preprocessing import MinMaxScaler
from scipy.special import erf
from learnware.algorithm.anomaly_detect.base import BaseAnomalyDetect
class iForest(BaseAnomalyDetect):
def __init__(self, n_estimators=100,
max_samples="auto",
contamination=0.1,
max_features=1.,
bootstrap=False,
n_jobs=1,
behaviour='old',
random_state=None,
verbose=0):
super(iForest, self).__init__()
self.contamination = contamination
self.n_estimators = n_estimators
self.max_samples = max_samples
self.max_features = max_features
self.bootstrap = bootstrap
self.n_jobs = n_jobs
self.behaviour = behaviour
self.random_state = random_state
self.verbose = verbose
# 内部算法的检测器
self.detector_ = None
self.decision_scores_ = None
self.threshold_ = None
self.labels_ = None
def fit(self, X, y=None):
self.detector_ = IsolationForest(n_estimators=self.n_estimators,
max_samples=self.max_samples,
contamination=self.contamination,
max_features=self.max_features,
bootstrap=self.bootstrap,
n_jobs=self.n_jobs,
random_state=self.random_state,
verbose=self.verbose)
X = self._data_type_transform(X)
self.detector_.fit(X, y=None, sample_weight=None)
self.decision_function(X)
self._decision_threshold_process()
return self
def predict(self, X, return_confidence=False):
X = self._data_type_transform(X)
if self.detector_ is None:
raise EOFError("detector not found, please fit the train data.")
pred_score = self.decision_function(X)
prediction = np.ones_like(pred_score, dtype=int)
prediction[pred_score < self.threshold_] = -1
if return_confidence:
confidence = self.predict_confidence(X)
return prediction, confidence
return prediction
def decision_function(self, X):
if self.detector_ is None:
raise EOFError("detector not found, please fit the train data.")
self.decision_scores_ = self.detector_.decision_function(X)
return self.decision_scores_
def _decision_threshold_process(self):
self.threshold_ = np.percentile(self.decision_scores_,
100 * self.contamination)
self.labels_ = (self.decision_scores_ > self.threshold_).astype(
'int').ravel()
self._mu = np.mean(self.decision_scores_)
self._sigma = np.std(self.decision_scores_)
return self
def predict_confidence(self, X):
n = len(self.decision_scores_)
test_scores = self.decision_function(X)
count_instances = np.vectorize(
lambda x: np.count_nonzero(self.decision_scores_ <= x))
n_instances = count_instances(test_scores)
# Derive the outlier probability using Bayesian approach
posterior_prob = np.vectorize(lambda x: (1 + x) / (2 + n))(n_instances)
# Transform the outlier probability into a confidence value
confidence = np.vectorize(
lambda p: 1 - binom.cdf(n - np.int(n * self.contamination), n, p))(
posterior_prob)
prediction = (test_scores > self.threshold_).astype('int').ravel()
np.place(confidence, prediction == 0, 1 - confidence[prediction == 0])
return confidence
def _data_type_transform(self, X):
if type(X) is list:
return np.array(X).reshape(-1, 1)
return X
| 37.148148 | 79 | 0.598704 | 3,783 | 0.939176 | 0 | 0 | 0 | 0 | 0 | 0 | 258 | 0.064052 |
724cda3b3a14ff18ab5608878c35ee486f9afa69 | 217 | py | Python | noxfile.py | rshnn/Practical-RL | f7688e224a342c7f67478f2c4cd6bb7b1a122205 | [
"MIT"
]
| 3 | 2022-02-14T17:59:56.000Z | 2022-02-15T10:08:43.000Z | noxfile.py | rshnn/Practical-RL | f7688e224a342c7f67478f2c4cd6bb7b1a122205 | [
"MIT"
]
| 21 | 2021-11-02T21:35:26.000Z | 2022-01-17T18:50:42.000Z | noxfile.py | rshnn/Practical-RL | f7688e224a342c7f67478f2c4cd6bb7b1a122205 | [
"MIT"
]
| 2 | 2021-11-24T15:25:17.000Z | 2022-02-14T19:04:56.000Z | from nox import session
@session(reuse_venv=True)
def docs(session):
session.install("jupyter-book", "sphinx-sitemap", "jupyterbook-latex")
# we need _config.yml _toc.yml
session.run("jb", "build", ".")
| 24.111111 | 74 | 0.686636 | 0 | 0 | 0 | 0 | 190 | 0.875576 | 0 | 0 | 93 | 0.428571 |
724da380c925fd0be608bd11f30b6d426eb5746d | 27 | py | Python | megnet/data/__init__.py | abdalazizrashid/megnet | 8ad0fca246465bd57d66392f790c5310c610dfff | [
"BSD-3-Clause"
]
| 367 | 2018-12-13T14:49:00.000Z | 2022-03-31T10:17:04.000Z | megnet/data/__init__.py | kdmsit/MEGNet | 4f3c76c6b99edcb41d52ae5e8ae9dc89956d33d1 | [
"MIT"
]
| 162 | 2019-02-08T20:38:12.000Z | 2022-03-31T21:13:06.000Z | megnet/data/__init__.py | kdmsit/MEGNet | 4f3c76c6b99edcb41d52ae5e8ae9dc89956d33d1 | [
"MIT"
]
| 119 | 2018-12-17T10:16:12.000Z | 2022-03-31T17:26:57.000Z | """
Data manipulations
"""
| 6.75 | 18 | 0.62963 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.962963 |
7252008c26b1662083a1400694c806c34e33ed67 | 910 | py | Python | graviteeio_cli/lint/functions/length.py | gravitee-io/gravitee-cli | 8e3bf9f2c0c2873e0f6e67f8fcaf0d3b6c44b3ca | [
"Apache-2.0"
]
| 12 | 2019-05-29T20:06:01.000Z | 2020-10-07T07:40:27.000Z | graviteeio_cli/lint/functions/length.py | gravitee-io/graviteeio-cli | 0e0069b00ce40813efc7d40142a6dc4b4ec7a261 | [
"Apache-2.0"
]
| 41 | 2019-11-04T18:18:18.000Z | 2021-04-22T16:12:51.000Z | graviteeio_cli/lint/functions/length.py | gravitee-io/gravitee-cli | 8e3bf9f2c0c2873e0f6e67f8fcaf0d3b6c44b3ca | [
"Apache-2.0"
]
| 6 | 2019-06-18T04:27:49.000Z | 2021-06-02T17:52:24.000Z | from graviteeio_cli.lint.types.function_result import FunctionResult
def length(value, **kwargs):
"""Count the length of a string an or array, the number of properties in an object, or a numeric value, and define minimum and/or maximum values."""
min = None
max = None
if "min" in kwargs and type(kwargs["min"]) is int:
min = kwargs["min"]
if "max" in kwargs and type(kwargs["max"]) is int:
max = kwargs["max"]
value_length = 0
if value:
if type(value) is (int or float):
value_length = value
else:
value_length = len(value)
results = []
if min and value_length < min:
results.append(
FunctionResult("min length is {}".format(min))
)
if max and value_length > max:
results.append(
FunctionResult("max length is {}".format(max))
)
return results
| 26 | 152 | 0.597802 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 214 | 0.235165 |
a0c60f619b683347cb7cc9f4f6e9936af96f0dbd | 27,874 | py | Python | smartrecruiters_python_client/apis/analytics_api.py | roksela/smartrecruiters-python-client | 6d0849d173a3d6718b5f0769098f4c76857f637d | [
"MIT"
]
| 5 | 2018-03-27T08:20:13.000Z | 2022-03-30T06:23:38.000Z | smartrecruiters_python_client/apis/analytics_api.py | roksela/smartrecruiters-python-client | 6d0849d173a3d6718b5f0769098f4c76857f637d | [
"MIT"
]
| null | null | null | smartrecruiters_python_client/apis/analytics_api.py | roksela/smartrecruiters-python-client | 6d0849d173a3d6718b5f0769098f4c76857f637d | [
"MIT"
]
| 2 | 2018-12-05T04:48:37.000Z | 2020-12-17T12:12:12.000Z | # coding: utf-8
"""
Unofficial python library for the SmartRecruiters API
The SmartRecruiters API provides a platform to integrate services or applications, build apps and create fully customizable career sites. It exposes SmartRecruiters functionality and allows to connect and build software enhancing it.
OpenAPI spec version: 1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class AnalyticsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def analytics_applications(self, **kwargs):
"""
Get the list of applications.
Get the list of applications. Fore more comprehensive description see [Analytics API](https://dev.smartrecruiters.com/customer-api/analytics-api/) and [Applications Data Service](https://dev.smartrecruiters.com/customer-api/analytics-api/applications-data-service/).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.analytics_applications(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str date_format: Defines response date format
:return: ApplicationsReport
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.analytics_applications_with_http_info(**kwargs)
else:
(data) = self.analytics_applications_with_http_info(**kwargs)
return data
def analytics_applications_with_http_info(self, **kwargs):
"""
Get the list of applications.
Get the list of applications. Fore more comprehensive description see [Analytics API](https://dev.smartrecruiters.com/customer-api/analytics-api/) and [Applications Data Service](https://dev.smartrecruiters.com/customer-api/analytics-api/applications-data-service/).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.analytics_applications_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str date_format: Defines response date format
:return: ApplicationsReport
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['date_format']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method analytics_applications" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/analytics/applications'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'date_format' in params:
query_params['dateFormat'] = params['date_format']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8', 'text/csv'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ApplicationsReport',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def analytics_hiring_team(self, **kwargs):
"""
Get the list of hiring team members.
Get the list of hiring team members. Fore more comprehensive description see [Analytics API](https://dev.smartrecruiters.com/customer-api/analytics-api/) and [Hiring Team Data Service](https://dev.smartrecruiters.com/customer-api/analytics-api/hiring-team-data-service/).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.analytics_hiring_team(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: HiringTeamReport
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.analytics_hiring_team_with_http_info(**kwargs)
else:
(data) = self.analytics_hiring_team_with_http_info(**kwargs)
return data
def analytics_hiring_team_with_http_info(self, **kwargs):
"""
Get the list of hiring team members.
Get the list of hiring team members. Fore more comprehensive description see [Analytics API](https://dev.smartrecruiters.com/customer-api/analytics-api/) and [Hiring Team Data Service](https://dev.smartrecruiters.com/customer-api/analytics-api/hiring-team-data-service/).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.analytics_hiring_team_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: HiringTeamReport
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method analytics_hiring_team" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/analytics/hiring-team'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8', 'text/csv'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HiringTeamReport',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def analytics_interviews(self, **kwargs):
"""
Get the list of interviews.
Get the list of interviews. Fore more comprehensive description see [Analytics API](https://dev.smartrecruiters.com/customer-api/analytics-api/) and [Interviews Data Service](https://dev.smartrecruiters.com/customer-api/analytics-api/interviews-data-service/).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.analytics_interviews(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str date_format: Defines response date format
:return: InterviewsReport
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.analytics_interviews_with_http_info(**kwargs)
else:
(data) = self.analytics_interviews_with_http_info(**kwargs)
return data
def analytics_interviews_with_http_info(self, **kwargs):
"""
Get the list of interviews.
Get the list of interviews. Fore more comprehensive description see [Analytics API](https://dev.smartrecruiters.com/customer-api/analytics-api/) and [Interviews Data Service](https://dev.smartrecruiters.com/customer-api/analytics-api/interviews-data-service/).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.analytics_interviews_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str date_format: Defines response date format
:return: InterviewsReport
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['date_format']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method analytics_interviews" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/analytics/interviews'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'date_format' in params:
query_params['dateFormat'] = params['date_format']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8', 'text/csv'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InterviewsReport',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def analytics_job_fields(self, **kwargs):
"""
Get the list of job fields.
Get the list of job fields. Fore more comprehensive description see [Analytics API](https://dev.smartrecruiters.com/customer-api/analytics-api/) and [Job Fields Data Service](https://dev.smartrecruiters.com/customer-api/analytics-api/job-fields-data-service/).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.analytics_job_fields(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: JobFieldsReport
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.analytics_job_fields_with_http_info(**kwargs)
else:
(data) = self.analytics_job_fields_with_http_info(**kwargs)
return data
def analytics_job_fields_with_http_info(self, **kwargs):
"""
Get the list of job fields.
Get the list of job fields. Fore more comprehensive description see [Analytics API](https://dev.smartrecruiters.com/customer-api/analytics-api/) and [Job Fields Data Service](https://dev.smartrecruiters.com/customer-api/analytics-api/job-fields-data-service/).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.analytics_job_fields_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: JobFieldsReport
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method analytics_job_fields" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/analytics/job-fields'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8', 'text/csv'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='JobFieldsReport',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def analytics_jobs(self, **kwargs):
"""
Get the list of jobs.
Get the list of jobs. Fore more comprehensive description see [Analytics API](https://dev.smartrecruiters.com/customer-api/analytics-api/) and [Jobs Data Service](https://dev.smartrecruiters.com/customer-api/analytics-api/jobs-data-service/).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.analytics_jobs(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str date_format: Defines response date format
:return: JobsReport
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.analytics_jobs_with_http_info(**kwargs)
else:
(data) = self.analytics_jobs_with_http_info(**kwargs)
return data
def analytics_jobs_with_http_info(self, **kwargs):
"""
Get the list of jobs.
Get the list of jobs. Fore more comprehensive description see [Analytics API](https://dev.smartrecruiters.com/customer-api/analytics-api/) and [Jobs Data Service](https://dev.smartrecruiters.com/customer-api/analytics-api/jobs-data-service/).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.analytics_jobs_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str date_format: Defines response date format
:return: JobsReport
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['date_format']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method analytics_jobs" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/analytics/jobs'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'date_format' in params:
query_params['dateFormat'] = params['date_format']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8', 'text/csv'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='JobsReport',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def analytics_positions(self, **kwargs):
"""
Get the list of job positions.
Get the list of job positions. Fore more comprehensive description see [Analytics API](https://dev.smartrecruiters.com/customer-api/analytics-api/) and [Positions Data Service](https://dev.smartrecruiters.com/customer-api/analytics-api/positions/).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.analytics_positions(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str date_format: Defines response date format
:return: PositionsReport
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.analytics_positions_with_http_info(**kwargs)
else:
(data) = self.analytics_positions_with_http_info(**kwargs)
return data
def analytics_positions_with_http_info(self, **kwargs):
"""
Get the list of job positions.
Get the list of job positions. Fore more comprehensive description see [Analytics API](https://dev.smartrecruiters.com/customer-api/analytics-api/) and [Positions Data Service](https://dev.smartrecruiters.com/customer-api/analytics-api/positions/).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.analytics_positions_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str date_format: Defines response date format
:return: PositionsReport
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['date_format']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method analytics_positions" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/analytics/positions'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'date_format' in params:
query_params['dateFormat'] = params['date_format']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8', 'text/csv'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PositionsReport',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 44.10443 | 280 | 0.583052 | 27,222 | 0.976609 | 0 | 0 | 0 | 0 | 0 | 0 | 15,054 | 0.540073 |
a0c68d4449b586355649b08e113c775fd8d862f6 | 398 | py | Python | Timofei-Khirianov-2019/lesson_001/003_anketa.py | anklav24/Python-Education | 49ebcfabda1376390ee71e1fe321a51e33831f9e | [
"Apache-2.0"
]
| null | null | null | Timofei-Khirianov-2019/lesson_001/003_anketa.py | anklav24/Python-Education | 49ebcfabda1376390ee71e1fe321a51e33831f9e | [
"Apache-2.0"
]
| null | null | null | Timofei-Khirianov-2019/lesson_001/003_anketa.py | anklav24/Python-Education | 49ebcfabda1376390ee71e1fe321a51e33831f9e | [
"Apache-2.0"
]
| null | null | null | name = input('Hello! What is your name? : ')
print('Nice to meet you,', name + '!')
print()
age = int(input('How old are you ' + name + '? : '))
print()
x = age + 1
print('А я думал тебе', x, end=' ')
if x >= 11 and x <= 19:
print('лет', end='')
elif x % 10 == 1:
print('год', end='')
elif x % 10 >= 2 and x % 10 <= 4:
print('года', end='')
else:
print('лет', end='')
print('!')
| 19.9 | 52 | 0.502513 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.35782 |
a0c69fd6e11617fc5f9eb586f7c2029856d0877b | 2,399 | py | Python | Technical_Indicators/rainbow_charts.py | vhn0912/Finance | 39cf49d4d778d322537531cee4ce3981cc9951f9 | [
"MIT"
]
| 441 | 2020-04-22T02:21:19.000Z | 2022-03-29T15:00:24.000Z | Technical_Indicators/rainbow_charts.py | happydasch/Finance | 4f6c5ea8f60fb0dc3b965ffb9628df83c2ecef35 | [
"MIT"
]
| 5 | 2020-07-06T15:19:58.000Z | 2021-07-23T18:32:29.000Z | Technical_Indicators/rainbow_charts.py | happydasch/Finance | 4f6c5ea8f60fb0dc3b965ffb9628df83c2ecef35 | [
"MIT"
]
| 111 | 2020-04-21T11:40:39.000Z | 2022-03-20T07:26:17.000Z | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
import yfinance as yf
yf.pdr_override()
import datetime as dt
# input
symbol = 'AAPL'
start = dt.date.today() - dt.timedelta(days = 365*2)
end = dt.date.today()
# Read data
df = yf.download(symbol,start,end)
# R=red, O=orange, Y=yellow, G=green, B=blue, I = indigo, and V=violet
df['Red'] = df['Adj Close'].rolling(2).mean()
df['Orange'] = df['Red'].rolling(2).mean()
df['Yellow'] = df['Orange'].rolling(2).mean()
df['Green'] = df['Yellow'].rolling(2).mean()
df['Blue'] = df['Green'].rolling(2).mean()
df['Indigo'] = df['Blue'].rolling(2).mean()
df['Violet'] = df['Indigo'].rolling(2).mean()
df = df.dropna()
colors = ['k','r', 'orange', 'yellow', 'g', 'b', 'indigo', 'violet']
df[['Adj Close','Red','Orange','Yellow','Green','Blue','Indigo','Violet']].plot(colors=colors, figsize=(18,12))
plt.fill_between(df.index, df['Low'], df['High'], color='grey', alpha=0.4)
plt.plot(df['Low'], c='darkred', linestyle='--', drawstyle="steps")
plt.plot(df['High'], c='forestgreen', linestyle='--', drawstyle="steps")
plt.title('Rainbow Charts')
plt.legend(loc='best')
plt.xlabel('Date')
plt.ylabel('Price')
plt.show()
# ## Candlestick with Rainbow
from matplotlib import dates as mdates
dfc = df.copy()
dfc['VolumePositive'] = dfc['Open'] < dfc['Adj Close']
#dfc = dfc.dropna()
dfc = dfc.reset_index()
dfc['Date'] = mdates.date2num(dfc['Date'].tolist())
from mplfinance.original_flavor import candlestick_ohlc
fig, ax1 = plt.subplots(figsize=(20,12))
candlestick_ohlc(ax1,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0)
#colors = ['red', 'orange', 'yellow', 'green', 'blue', 'indigo', 'violet']
#labels = ['Red', 'Orange', 'Yellow', 'Green', 'Blue', 'Indigo', 'Violet']
for i in dfc[['Red', 'Orange', 'Yellow', 'Green', 'Blue', 'Indigo', 'Violet']]:
ax1.plot(dfc['Date'], dfc[i], color=i, label=i)
ax1.xaxis_date()
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))
ax1.grid(True, which='both')
ax1.minorticks_on()
ax1v = ax1.twinx()
colors = dfc.VolumePositive.map({True: 'g', False: 'r'})
ax1v.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)
ax1v.axes.yaxis.set_ticklabels([])
ax1v.set_ylim(0, 3*df.Volume.max())
ax1.set_title('Stock '+ symbol +' Closing Price')
ax1.set_ylabel('Price')
ax1.set_xlabel('Date')
ax1.legend(loc='best')
plt.show() | 36.348485 | 111 | 0.667361 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 794 | 0.330971 |
a0c8d55fb37c691da19d42d22717e7769ad0fbbf | 1,670 | py | Python | UpWork_Projects/pdf_downloader.py | SurendraTamang/Web-Scrapping | 2bb60cce9010b4b68f5c11bf295940832bb5df50 | [
"MIT"
]
| null | null | null | UpWork_Projects/pdf_downloader.py | SurendraTamang/Web-Scrapping | 2bb60cce9010b4b68f5c11bf295940832bb5df50 | [
"MIT"
]
| null | null | null | UpWork_Projects/pdf_downloader.py | SurendraTamang/Web-Scrapping | 2bb60cce9010b4b68f5c11bf295940832bb5df50 | [
"MIT"
]
| 1 | 2022-01-18T17:15:51.000Z | 2022-01-18T17:15:51.000Z | import requests
from urllib.request import urlopen
from urllib.request import urlretrieve
import cgi
import os.path
def retrive_file_name(url):
#url = 'https://material.ibear.pt/BTHorarios2019/FileGet.aspx?FileId=5601'
remotefile = urlopen(url)
blah = remotefile.info()['Content-Disposition']
_, params = cgi.parse_header(blah)
filename = params["filename"]
#urlretrieve(url, filename)
return filename
def pdf_downloader():
for i in range (0,10000):
cntr = ''
l = len(str(i))
if l<4:
for _ in range(0,(4-l)):
cntr += '0'
cntr += str(i)
else:
cntr = str(i)
try:
url = f"https://material.ibear.pt/BTHorarios2019/FileGet.aspx?FileId={cntr}"
response = requests.get(url)
if response.status_code == 200:
file_name = retrive_file_name(url)
file_path1 = f'D:/upworkWorkspace/25032020_pdf_downloader/downloads/{file_name}'
file_path2 = f'D:/upworkWorkspace/25032020_pdf_downloader/downloads/copy_{cntr}_{file_name}'
if not os.path.isfile(file_path1) and not os.path.isfile(file_path2):
print(file_name)
with open(file_path1, 'wb') as f:
f.write(response.content)
else:
print(f'copy_{cntr}_{file_name}')
with open(file_path2, 'wb') as f:
f.write(response.content)
else:
print("Counter: ", cntr)
except:
pass
pdf_downloader() | 33.4 | 108 | 0.552096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 398 | 0.238323 |
a0cab7a3ae269edaac7fa1a7d902a54bd96a752d | 13,282 | py | Python | backend/app/vta/texdf/tex_df.py | megagonlabs/leam | f19830d4d6935bece7d163abbc533cfb4bc2e729 | [
"Apache-2.0"
]
| 7 | 2020-09-14T07:03:51.000Z | 2022-01-13T10:11:53.000Z | backend/app/vta/texdf/tex_df.py | megagonlabs/leam | f19830d4d6935bece7d163abbc533cfb4bc2e729 | [
"Apache-2.0"
]
| null | null | null | backend/app/vta/texdf/tex_df.py | megagonlabs/leam | f19830d4d6935bece7d163abbc533cfb4bc2e729 | [
"Apache-2.0"
]
| 1 | 2020-09-07T22:26:27.000Z | 2020-09-07T22:26:27.000Z | import spacy
import json, os
import dill as pickle
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sqlalchemy import create_engine, select, MetaData, Table, Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from typing import List, Dict, Any
from flask import current_app
from app.models import Dataset
# from vta.operators import featurize
# from vta.operators import clean
# from vta.operators import select
# from vta import spacy_nlp
from .tex_column import TexColumn
from .tex_metadata import MetadataItem
from .tex_vis import TexVis
from ..types import VTAColumnType, VisType
class TexDF:
dataset_name: str
data_view: pd.DataFrame
table_view: []
table_links: []
columns: Dict[str, TexColumn]
visualizations: List[TexVis]
coordination_indexes: Dict[str, Dict]
udf: Dict[str, Any] # TODO: specify typing of function expected
def __init__(self, df, name):
self.dataset_name = name
self.data_view = df
self.table_view = []
self.table_links = []
self.columns = {i: TexColumn(i, VTAColumnType.TEXT) for i in df.columns}
self.visualizations = []
self.coordination_indexes = {}
self.udf = {}
# self.cached_visual_encodings = {i: {} for i in self.df.columns}
# self.view_indexes = {}
self.update_table_view()
if os.path.exists("/app/UI_QUEUE.pkl"):
self.UI_QUEUE = pickle.load(open("UI_QUEUE.pkl", "rb"))
else:
self.UI_QUEUE = []
pickle.dump(self.UI_QUEUE, open("/app/UI_QUEUE.pkl", "wb"))
def get_dataview_column(self, col_name: str) -> pd.Series:
return self.data_view[col_name]
def get_column_type(self, col_name: str) -> VTAColumnType:
return self.columns[col_name].col_type
def get_column_types(self, col_names: List[str]) -> List[VTAColumnType]:
return [self.columns[col].col_type for col in col_names]
def get_all_column_types(self) -> List[str]:
return [self.columns[col].col_type.value for col in self.columns.keys()]
def get_table_view(self):
return self.table_view
def get_table_view_columns(self):
return [i for i in self.data_view.columns]
def get_vis(self, i):
return self.visualizations[i]
def get_visualizations(self):
vis_list = [i.to_dict() for i in self.visualizations]
return vis_list
def get_column_metadata(self, col_name: str) -> TexColumn:
return self.columns[col_name]
def get_all_metadata(self):
# metadata will be a table with 3 columns: tag | data_type | data
all_metadata = []
for _, col in self.columns.items():
for _, md in col.metadata.items():
all_metadata.append(
{"tag": md.tag, "type": md.md_type.value, "value": md.value}
)
return all_metadata
def print_metadata(self):
# metadata will be a table with 3 columns: tag | data_type | data
pretty_print = ""
for _, col in self.columns.items():
for _, md in col.metadata.items():
col_metadata_item = {}
col_metadata_item["column"] = col.col_name
col_metadata_item["tag_name"] = md.tag
col_metadata_item["metadata_type"] = md.md_type.value
col_metadata_item["value"] = str(md.value)[:80] + "..."
pretty_print += str(col_metadata_item) + "\n\n"
print(pretty_print)
def get_vis_lookup_table(self, vis_idx):
return self.visualizations[vis_idx].row_lookup_table
def get_coordination_idx(self, metadata_name):
return self.coordination_indexes[metadata_name]
def get_vis_links(self, vis_idx):
if vis_idx == "table":
return self.table_links
return self.visualizations[vis_idx].links
def get_columns_vega_format(self, columns, data_type, md_tag=None):
# Take in list of columns, output data from those columns formatted
# in vega-lite format: [{"id": 1, "x": 0.3}, {"id": 2, "x": 0.7}, ...]
vega_rows = []
if data_type == "dataview":
for _, row in self.data_view[columns].iterrows():
vega_row = {c: row[c] for c in columns}
vega_rows.append(vega_row)
elif data_type == "metadata":
col_name = columns[0]
data = self.get_column_metadata(col_name).get_metadata_by_tag(md_tag)
# add some way to handle different types of metadata
if md_tag == "top_scores":
tw_list = [(k, v) for k, v in data.value.items()]
tw_list = sorted(tw_list, key=lambda word: word[1], reverse=True)
tw_list = [(v[0], v[1], i + 1) for i, v in enumerate(tw_list)]
# log.info("top words list:")
# log.info(tw_list)
for v in tw_list:
vega_rows.append({"topword": v[0], "score": v[1], "order": v[2]})
else:
print("data is: ")
print(data.value)
assert isinstance(data.value, dict)
for label, count in data.value.items():
vega_rows.append({"label": label, "count": count})
return vega_rows
def get_udf(self, func_name):
return self.udf[func_name]
# TODO: specify a certain function params/return values
def add_udf(self, func):
self.udf[func.__name__] = func
self.checkpoint_texdf()
def print_udfs(self):
print(self.udf)
def rename_column(self, old_col, new_col):
self.data_view = self.data_view.rename(columns={old_col: new_col})
self.columns[new_col] = self.columns[old_col]
del self.columns[old_col]
self.update_table_view()
task = {"view": "table", "type": "update_column"}
self.add_to_uiq(task)
self.checkpoint_texdf()
# TODO: add regex to this
def replace_column_value(self, col_name, old_value, new_value):
# data_view["category"].replace("ham", 0, inplace=True)
self.data_view[col_name].replace(old_value, new_value, inplace=True)
self.update_table_view()
task = {"view": "table", "type": "update_column"}
self.add_to_uiq(task)
self.checkpoint_texdf()
def select_vis_element(self, vis_idx, item_idx):
# TODO: add support for words in select like in topwords tf-idf barchart
# TODO: add support for linking, where we might generate many new select ui tasks
if vis_idx == "table":
task = {"view": "table", "type": "select", "rows": item_idx}
else:
task = {
"view": "datavis",
"type": "select",
"vis_idx": vis_idx,
"rows": item_idx,
}
self.add_to_uiq(task)
self.checkpoint_texdf()
def add_coord_idx(self, metadata, coord_idx):
self.coordination_indexes[metadata] = coord_idx
self.checkpoint_texdf()
def remove_vis(self, vis_idx):
if vis_idx < 0 or vis_idx >= len(self.visualizations):
return
# remove vis in place and save
del self.visualizations[vis_idx]
for v in self.visualizations:
if vis_idx in v.links:
v.links.remove(vis_idx)
# update the rest of the links to correspond to their new positions
for v in self.visualizations:
for link_idx, link in enumerate(v.links):
if vis_idx > link:
pass
elif vis_idx < link:
v.links[link_idx] = link - 1
else:
raise Exception(
"there should be no link with vis idx %d it was deleted",
vis_idx,
)
task = {
"view": "table",
"type": "update_vis",
} # change this to be related to vis
self.add_to_uiq(task)
self.checkpoint_texdf()
def remove_link(self, src, target):
if src == "table":
vis_obj = self.table_links
else:
vis_obj = self.visualizations[src].links
if target in vis_obj:
vis_obj.remove(target)
self.checkpoint_texdf()
def add_uni_link(self, src, target):
if src == "table":
vis_obj = self.table_links
else:
vis_obj = self.visualizations[src].links
if target not in vis_obj:
vis_obj.append(target)
self.checkpoint_texdf()
def add_bi_link(self, src, target):
if src == "table":
vis_obj_src = self.table_links
else:
vis_obj_src = self.visualizations[src].links
if target == "table":
vis_obj_target = self.table_links
else:
vis_obj_target = self.visualizations[target].links
if target not in vis_obj_src:
vis_obj_src.append(target)
if src not in vis_obj_target:
vis_obj_target.append(src)
self.checkpoint_texdf()
def add_visualization(self, columns, vis_type, selection=None, md_tag=None):
# if aggregate type vis, using metadata, if not using column(s)
if vis_type == VisType.tw_barchart or vis_type == VisType.barchart:
data_type = "metadata"
vis_data = self.get_columns_vega_format(columns, data_type, md_tag=md_tag)
else:
data_type = "dataview"
vis_data = self.get_columns_vega_format(columns, data_type)
col_types = self.get_column_types(columns)
new_vis = TexVis(
vis_type,
columns,
col_types,
vis_data,
selection_type=selection,
md_tag=md_tag,
)
self.visualizations.append(new_vis)
vis_index = len(self.visualizations) - 1
task = {
"view": "datavis",
"type": "add_vis",
"idx": vis_index,
"vis_type": new_vis.vis_type.value,
"selection_type": new_vis.selection_type,
}
self.add_to_uiq(task)
self.checkpoint_texdf()
def update_table_view(self):
readable_df = self.data_view.copy()
for k, v in self.columns.items():
col_type = v.col_type
if col_type == VTAColumnType.VECTOR:
is_column_list = type(readable_df[k][0]) == list
row_vectors = (
readable_df[k].map(lambda r: np.array(r).tolist())
if is_column_list
else readable_df[k].map(lambda r: r.toarray().tolist())
)
row_vectors = (
row_vectors if is_column_list else [r[0] for r in row_vectors]
)
row_string_vectors = [[str(f)[:6] for f in r] for r in row_vectors]
row_string_vectors = map(lambda r: r[:6], row_string_vectors)
row_string_vectors = [
", ".join(r) + ", ..." for r in row_string_vectors
]
readable_df[k] = row_string_vectors
elif col_type == VTAColumnType.FLOAT:
float_column = readable_df[k]
row_floats = [round(f, 5) for f in float_column]
readable_df[k] = row_floats
self.table_view = readable_df.values.tolist()
def update_dataview_column(
self, col_name: str, col_type: VTAColumnType, new_column: Any
):
self.data_view[col_name] = new_column
col = self.columns[col_name]
col.col_type = col_type
self.update_table_view()
task = {"view": "table", "type": "update_column"}
self.add_to_uiq(task)
self.checkpoint_texdf()
def create_dataview_column(
self, new_col_name: str, col_type: VTAColumnType, new_column: Any
):
self.data_view[new_col_name] = new_column
self.columns[new_col_name] = TexColumn(new_col_name, col_type)
task = {"view": "table", "type": "create_column"}
self.add_to_uiq(task)
self.update_table_view()
self.checkpoint_texdf()
# make sure that an aggregate is returning a data structure with the corresponding rows included
# b/c will use those to determine coordination
def add_metadata(
self, col_name: str, tag: str, md_type: VTAColumnType, md_value: Any
):
new_metadata = MetadataItem(tag, col_name, md_type, md_value)
col = self.columns[col_name]
col.metadata[tag] = new_metadata
task = {"view": "table", "type": "add_metadata"}
self.add_to_uiq(task)
# TODO: update table view to create presentable version of metadata???
self.checkpoint_texdf()
def add_to_uiq(self, task):
self.UI_QUEUE.append(task)
pickle.dump(self.UI_QUEUE, open("UI_QUEUE.pkl", "wb"))
def checkpoint_texdf(self):
name = self.dataset_name.split(".")[0]
dataframe_pkl_file = "/app/" + name + ".pkl"
pickle.dump(self, open(dataframe_pkl_file, "wb"))
| 37.840456 | 100 | 0.595844 | 12,568 | 0.946243 | 0 | 0 | 0 | 0 | 0 | 0 | 2,040 | 0.153591 |
a0cc5ea31e6d19f7b084b456d80ccf0e5baf6865 | 1,604 | py | Python | orders-api/orders_api/models.py | kelvinducray/fastapi-orders-api | 37176329f717adf8ad8749be4ed50f7c875b0cf5 | [
"MIT"
]
| null | null | null | orders-api/orders_api/models.py | kelvinducray/fastapi-orders-api | 37176329f717adf8ad8749be4ed50f7c875b0cf5 | [
"MIT"
]
| null | null | null | orders-api/orders_api/models.py | kelvinducray/fastapi-orders-api | 37176329f717adf8ad8749be4ed50f7c875b0cf5 | [
"MIT"
]
| null | null | null | from uuid import uuid4
from sqlalchemy import Boolean, Column, DateTime, Integer, String
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import relationship
from .database import Base
class Customers(Base):
__tablename__ = "customers"
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid4)
first_name = Column(String)
last_name = Column(String)
email = Column(String, unique=True)
class Orders(Base):
__tablename__ = "customer_orders"
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid4)
order_date = Column(DateTime)
customer_id = Column(Integer, unique=True)
product_id = Column(String)
is_active = Column(Boolean, default=True)
class Products(Base):
__tablename__ = "products"
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid4)
product_name = Column(String, unique=True)
product_price = Column(String)
is_active = Column(Boolean, default=True)
# class User(Base):
# __tablename__ = "users"
# id = Column(Integer, primary_key=True, index=True)
# email = Column(String, unique=True, index=True)
# hashed_password = Column(String)
# is_active = Column(Boolean, default=True)
# items = relationship("Item", back_populates="owner")
# class Item(Base):
# __tablename__ = "items"
# id = Column(Integer, primary_key=True, index=True)
# title = Column(String, index=True)
# description = Column(String, index=True)
# owner_id = Column(Integer, ForeignKey("users.id"))
# owner = relationship("User", back_populates="items")
| 27.655172 | 68 | 0.704489 | 763 | 0.475686 | 0 | 0 | 0 | 0 | 0 | 0 | 642 | 0.400249 |
a0cc745e3a8e279006b132f30ea4111764df2ce1 | 32,293 | py | Python | src/ID_meshes.py | faycalki/tainted-paths | 81cecf6c1fba903ec3b8043e22652d222892609d | [
"MIT"
]
| 4 | 2019-09-26T21:34:32.000Z | 2021-11-18T19:31:15.000Z | src/ID_meshes.py | faycalki/tainted-paths | 81cecf6c1fba903ec3b8043e22652d222892609d | [
"MIT"
]
| null | null | null | src/ID_meshes.py | faycalki/tainted-paths | 81cecf6c1fba903ec3b8043e22652d222892609d | [
"MIT"
]
| null | null | null | mesh_pic_bandits = 0
mesh_pic_mb_warrior_1 = 1
mesh_pic_messenger = 2
mesh_pic_prisoner_man = 3
mesh_pic_prisoner_fem = 4
mesh_pic_prisoner_wilderness = 5
mesh_pic_siege_sighted = 6
mesh_pic_siege_sighted_fem = 7
mesh_pic_camp = 8
mesh_pic_payment = 9
mesh_pic_escape_1 = 10
mesh_pic_escape_1_fem = 11
mesh_pic_victory = 12
mesh_pic_defeat = 13
mesh_pic_wounded = 14
mesh_pic_wounded_fem = 15
mesh_pic_steppe_bandits = 16
mesh_pic_mountain_bandits = 17
mesh_pic_sea_raiders = 18
mesh_pic_deserters = 19
mesh_pic_forest_bandits = 20
mesh_pic_cattle = 21
mesh_pic_looted_village = 22
mesh_pic_village_p = 23
mesh_pic_village_s = 24
mesh_pic_village_w = 25
mesh_pic_recruits = 26
mesh_pic_arms_swadian = 27
mesh_pic_castle1 = 28
mesh_pic_castledes = 29
mesh_pic_castlesnow = 30
mesh_pic_charge = 31
mesh_pic_khergit = 32
mesh_pic_nord = 33
mesh_pic_rhodock = 34
mesh_pic_sally_out = 35
mesh_pic_siege_attack = 36
mesh_pic_swad = 37
mesh_pic_town1 = 38
mesh_pic_towndes = 39
mesh_pic_townriot = 40
mesh_pic_townsnow = 41
mesh_pic_vaegir = 42
mesh_pic_villageriot = 43
mesh_pic_sarranid_encounter = 44
mesh_pic_mort = 45
mesh_mp_score_a = 46
mesh_mp_score_b = 47
mesh_portrait_blend_out = 48
mesh_load_window = 49
mesh_checkbox_off = 50
mesh_checkbox_on = 51
mesh_white_plane = 52
mesh_white_dot = 53
mesh_player_dot = 54
mesh_flag_infantry = 55
mesh_flag_archers = 56
mesh_flag_cavalry = 57
mesh_inv_slot = 58
mesh_mp_ingame_menu = 59
mesh_mp_inventory_left = 60
mesh_mp_inventory_right = 61
mesh_mp_inventory_choose = 62
mesh_mp_inventory_slot_glove = 63
mesh_mp_inventory_slot_horse = 64
mesh_mp_inventory_slot_armor = 65
mesh_mp_inventory_slot_helmet = 66
mesh_mp_inventory_slot_boot = 67
mesh_mp_inventory_slot_empty = 68
mesh_mp_inventory_slot_equip = 69
mesh_mp_inventory_left_arrow = 70
mesh_mp_inventory_right_arrow = 71
mesh_mp_ui_host_main = 72
mesh_mp_ui_host_maps_1 = 73
mesh_mp_ui_host_maps_2 = 74
mesh_mp_ui_host_maps_3 = 75
mesh_mp_ui_host_maps_4 = 76
mesh_mp_ui_host_maps_5 = 77
mesh_mp_ui_host_maps_6 = 78
mesh_mp_ui_host_maps_7 = 79
mesh_mp_ui_host_maps_8 = 80
mesh_mp_ui_host_maps_9 = 81
mesh_mp_ui_host_maps_10 = 82
mesh_mp_ui_host_maps_11 = 83
mesh_mp_ui_host_maps_12 = 84
mesh_mp_ui_host_maps_13 = 85
mesh_mp_ui_host_maps_randomp = 86
mesh_mp_ui_host_maps_randoms = 87
mesh_mp_ui_command_panel = 88
mesh_mp_ui_command_border_l = 89
mesh_mp_ui_command_border_r = 90
mesh_mp_ui_welcome_panel = 91
mesh_flag_project_sw = 92
mesh_flag_project_vg = 93
mesh_flag_project_kh = 94
mesh_flag_project_nd = 95
mesh_flag_project_rh = 96
mesh_flag_project_sr = 97
mesh_flag_projects_end = 98
mesh_flag_project_sw_miss = 99
mesh_flag_project_vg_miss = 100
mesh_flag_project_kh_miss = 101
mesh_flag_project_nd_miss = 102
mesh_flag_project_rh_miss = 103
mesh_flag_project_sr_miss = 104
mesh_flag_project_misses_end = 105
mesh_color_picker = 106
mesh_custom_map_banner_01 = 107
mesh_custom_map_banner_02 = 108
mesh_custom_map_banner_03 = 109
mesh_custom_banner_01 = 110
mesh_custom_banner_02 = 111
mesh_custom_banner_bg = 112
mesh_custom_banner_fg01 = 113
mesh_custom_banner_fg02 = 114
mesh_custom_banner_fg03 = 115
mesh_custom_banner_fg04 = 116
mesh_custom_banner_fg05 = 117
mesh_custom_banner_fg06 = 118
mesh_custom_banner_fg07 = 119
mesh_custom_banner_fg08 = 120
mesh_custom_banner_fg09 = 121
mesh_custom_banner_fg10 = 122
mesh_custom_banner_fg11 = 123
mesh_custom_banner_fg12 = 124
mesh_custom_banner_fg13 = 125
mesh_custom_banner_fg14 = 126
mesh_custom_banner_fg15 = 127
mesh_custom_banner_fg16 = 128
mesh_custom_banner_fg17 = 129
mesh_custom_banner_fg18 = 130
mesh_custom_banner_fg19 = 131
mesh_custom_banner_fg20 = 132
mesh_custom_banner_fg21 = 133
mesh_custom_banner_fg22 = 134
mesh_custom_banner_fg23 = 135
mesh_custom_banner_charge_01 = 136
mesh_custom_banner_charge_02 = 137
mesh_custom_banner_charge_03 = 138
mesh_custom_banner_charge_04 = 139
mesh_custom_banner_charge_05 = 140
mesh_custom_banner_charge_06 = 141
mesh_custom_banner_charge_07 = 142
mesh_custom_banner_charge_08 = 143
mesh_custom_banner_charge_09 = 144
mesh_custom_banner_charge_10 = 145
mesh_custom_banner_charge_11 = 146
mesh_custom_banner_charge_12 = 147
mesh_custom_banner_charge_13 = 148
mesh_custom_banner_charge_14 = 149
mesh_custom_banner_charge_15 = 150
mesh_custom_banner_charge_16 = 151
mesh_custom_banner_charge_17 = 152
mesh_custom_banner_charge_18 = 153
mesh_custom_banner_charge_19 = 154
mesh_custom_banner_charge_20 = 155
mesh_custom_banner_charge_21 = 156
mesh_custom_banner_charge_22 = 157
mesh_custom_banner_charge_23 = 158
mesh_custom_banner_charge_24 = 159
mesh_custom_banner_charge_25 = 160
mesh_custom_banner_charge_26 = 161
mesh_custom_banner_charge_27 = 162
mesh_custom_banner_charge_28 = 163
mesh_custom_banner_charge_29 = 164
mesh_custom_banner_charge_30 = 165
mesh_custom_banner_charge_31 = 166
mesh_custom_banner_charge_32 = 167
mesh_custom_banner_charge_33 = 168
mesh_custom_banner_charge_34 = 169
mesh_custom_banner_charge_35 = 170
mesh_custom_banner_charge_36 = 171
mesh_custom_banner_charge_37 = 172
mesh_custom_banner_charge_38 = 173
mesh_custom_banner_charge_39 = 174
mesh_custom_banner_charge_40 = 175
mesh_custom_banner_charge_41 = 176
mesh_custom_banner_charge_42 = 177
mesh_custom_banner_charge_43 = 178
mesh_custom_banner_charge_44 = 179
mesh_custom_banner_charge_45 = 180
mesh_custom_banner_charge_46 = 181
mesh_tableau_mesh_custom_banner = 182
mesh_tableau_mesh_custom_banner_square = 183
mesh_tableau_mesh_custom_banner_tall = 184
mesh_tableau_mesh_custom_banner_short = 185
mesh_tableau_mesh_shield_round_1 = 186
mesh_tableau_mesh_shield_round_2 = 187
mesh_tableau_mesh_shield_round_3 = 188
mesh_tableau_mesh_shield_round_4 = 189
mesh_tableau_mesh_shield_round_5 = 190
mesh_tableau_mesh_shield_small_round_1 = 191
mesh_tableau_mesh_shield_small_round_2 = 192
mesh_tableau_mesh_shield_small_round_3 = 193
mesh_tableau_mesh_shield_kite_1 = 194
mesh_tableau_mesh_shield_kite_2 = 195
mesh_tableau_mesh_shield_kite_3 = 196
mesh_tableau_mesh_shield_kite_4 = 197
mesh_tableau_mesh_shield_heater_1 = 198
mesh_tableau_mesh_shield_heater_2 = 199
mesh_tableau_mesh_shield_pavise_1 = 200
mesh_tableau_mesh_shield_pavise_2 = 201
mesh_heraldic_armor_bg = 202
mesh_tableau_mesh_heraldic_armor_a = 203
mesh_tableau_mesh_heraldic_armor_b = 204
mesh_tableau_mesh_heraldic_armor_c = 205
mesh_tableau_mesh_heraldic_armor_d = 206
mesh_outer_terrain_plain_1 = 207
mesh_banner_a01 = 208
mesh_banner_a02 = 209
mesh_banner_a03 = 210
mesh_banner_a04 = 211
mesh_banner_a05 = 212
mesh_banner_a06 = 213
mesh_banner_a07 = 214
mesh_banner_a08 = 215
mesh_banner_a09 = 216
mesh_banner_a10 = 217
mesh_banner_a11 = 218
mesh_banner_a12 = 219
mesh_banner_a13 = 220
mesh_banner_a14 = 221
mesh_banner_a15 = 222
mesh_banner_a16 = 223
mesh_banner_a17 = 224
mesh_banner_a18 = 225
mesh_banner_a19 = 226
mesh_banner_a20 = 227
mesh_banner_a21 = 228
mesh_banner_b01 = 229
mesh_banner_b02 = 230
mesh_banner_b03 = 231
mesh_banner_b04 = 232
mesh_banner_b05 = 233
mesh_banner_b06 = 234
mesh_banner_b07 = 235
mesh_banner_b08 = 236
mesh_banner_b09 = 237
mesh_banner_b10 = 238
mesh_banner_b11 = 239
mesh_banner_b12 = 240
mesh_banner_b13 = 241
mesh_banner_b14 = 242
mesh_banner_b15 = 243
mesh_banner_b16 = 244
mesh_banner_b17 = 245
mesh_banner_b18 = 246
mesh_banner_b19 = 247
mesh_banner_b20 = 248
mesh_banner_b21 = 249
mesh_banner_c01 = 250
mesh_banner_c02 = 251
mesh_banner_c03 = 252
mesh_banner_c04 = 253
mesh_banner_c05 = 254
mesh_banner_c06 = 255
mesh_banner_c07 = 256
mesh_banner_c08 = 257
mesh_banner_c09 = 258
mesh_banner_c10 = 259
mesh_banner_c11 = 260
mesh_banner_c12 = 261
mesh_banner_c13 = 262
mesh_banner_c14 = 263
mesh_banner_c15 = 264
mesh_banner_c16 = 265
mesh_banner_c17 = 266
mesh_banner_c18 = 267
mesh_banner_c19 = 268
mesh_banner_c20 = 269
mesh_banner_c21 = 270
mesh_banner_d01 = 271
mesh_banner_d02 = 272
mesh_banner_d03 = 273
mesh_banner_d04 = 274
mesh_banner_d05 = 275
mesh_banner_d06 = 276
mesh_banner_d07 = 277
mesh_banner_d08 = 278
mesh_banner_d09 = 279
mesh_banner_d10 = 280
mesh_banner_d11 = 281
mesh_banner_d12 = 282
mesh_banner_d13 = 283
mesh_banner_d14 = 284
mesh_banner_d15 = 285
mesh_banner_d16 = 286
mesh_banner_d17 = 287
mesh_banner_d18 = 288
mesh_banner_d19 = 289
mesh_banner_d20 = 290
mesh_banner_d21 = 291
mesh_banner_e01 = 292
mesh_banner_e02 = 293
mesh_banner_e03 = 294
mesh_banner_e04 = 295
mesh_banner_e05 = 296
mesh_banner_e06 = 297
mesh_banner_e07 = 298
mesh_banner_e08 = 299
mesh_banner_e09 = 300
mesh_banner_e10 = 301
mesh_banner_e11 = 302
mesh_banner_e12 = 303
mesh_banner_e13 = 304
mesh_banner_e14 = 305
mesh_banner_e15 = 306
mesh_banner_e16 = 307
mesh_banner_e17 = 308
mesh_banner_e18 = 309
mesh_banner_e19 = 310
mesh_banner_e20 = 311
mesh_banner_e21 = 312
mesh_banner_f01 = 313
mesh_banner_f02 = 314
mesh_banner_f03 = 315
mesh_banner_f04 = 316
mesh_banner_f05 = 317
mesh_banner_f06 = 318
mesh_banner_f07 = 319
mesh_banner_f08 = 320
mesh_banner_f09 = 321
mesh_banner_f10 = 322
mesh_banner_f11 = 323
mesh_banner_f12 = 324
mesh_banner_f13 = 325
mesh_banner_f14 = 326
mesh_banner_f15 = 327
mesh_banner_f16 = 328
mesh_banner_f17 = 329
mesh_banner_f18 = 330
mesh_banner_f19 = 331
mesh_banner_f20 = 332
mesh_banner_h01 = 333
mesh_banner_h02 = 334
mesh_banner_h03 = 335
mesh_banner_h04 = 336
mesh_banner_h05 = 337
mesh_banner_h06 = 338
mesh_banner_h07 = 339
mesh_banner_h08 = 340
mesh_banner_h09 = 341
mesh_banner_h10 = 342
mesh_banner_h11 = 343
mesh_banner_h12 = 344
mesh_banner_h13 = 345
mesh_banner_h14 = 346
mesh_banner_h15 = 347
mesh_banner_h16 = 348
mesh_banner_h17 = 349
mesh_banner_h18 = 350
mesh_banner_h19 = 351
mesh_banner_h20 = 352
mesh_banner_h21 = 353
mesh_banner_i01 = 354
mesh_banner_i02 = 355
mesh_banner_i03 = 356
mesh_banner_i04 = 357
mesh_banner_i05 = 358
mesh_banner_i06 = 359
mesh_banner_i07 = 360
mesh_banner_i08 = 361
mesh_banner_i09 = 362
mesh_banner_i10 = 363
mesh_banner_i11 = 364
mesh_banner_i12 = 365
mesh_banner_i13 = 366
mesh_banner_i14 = 367
mesh_banner_i15 = 368
mesh_banner_i16 = 369
mesh_banner_i17 = 370
mesh_banner_i18 = 371
mesh_banner_i19 = 372
mesh_banner_i20 = 373
mesh_banner_i21 = 374
mesh_banner_k01 = 375
mesh_banner_k02 = 376
mesh_banner_k03 = 377
mesh_banner_k04 = 378
mesh_banner_k05 = 379
mesh_banner_k06 = 380
mesh_banner_k07 = 381
mesh_banner_k08 = 382
mesh_banner_k09 = 383
mesh_banner_k10 = 384
mesh_banner_k11 = 385
mesh_banner_k12 = 386
mesh_banner_k13 = 387
mesh_banner_k14 = 388
mesh_banner_k15 = 389
mesh_banner_k16 = 390
mesh_banner_k17 = 391
mesh_banner_k18 = 392
mesh_banner_k19 = 393
mesh_banner_k20 = 394
mesh_banner_g01 = 395
mesh_banner_g02 = 396
mesh_banner_g03 = 397
mesh_banner_g04 = 398
mesh_banner_g05 = 399
mesh_banner_g06 = 400
mesh_banner_g07 = 401
mesh_banner_g08 = 402
mesh_banner_g09 = 403
mesh_banner_g10 = 404
mesh_banner_kingdom_a = 405
mesh_banner_kingdom_b = 406
mesh_banner_kingdom_c = 407
mesh_banner_kingdom_d = 408
mesh_banner_kingdom_e = 409
mesh_banner_kingdom_f = 410
mesh_banner_kingdom_g = 411
mesh_banner_kingdom_h = 412
mesh_banner_kingdom_i = 413
mesh_banner_kingdom_j = 414
mesh_banner_kingdom_k = 415
mesh_banner_kingdom_l = 416
mesh_banner_kingdom_ll = 417
mesh_banner_kingdom_m = 418
mesh_banner_kingdom_n = 419
mesh_banner_kingdom_o = 420
mesh_banner_kingdom_p = 421
mesh_banner_kingdom_q = 422
mesh_banner_kingdom_r = 423
mesh_banner_kingdom_s = 424
mesh_banner_kingdom_t = 425
mesh_banner_kingdom_u = 426
mesh_banner_kingdom_v = 427
mesh_banner_kingdom_w = 428
mesh_banner_kingdom_x = 429
mesh_banner_kingdom_y = 430
mesh_banner_kingdom_z = 431
mesh_banner_kingdom_2a = 432
mesh_banner_kingdom_2b = 433
mesh_banner_kingdom_2c = 434
mesh_banner_kingdom_2d = 435
mesh_banner_k21 = 436
mesh_arms_a01 = 437
mesh_arms_a02 = 438
mesh_arms_a03 = 439
mesh_arms_a04 = 440
mesh_arms_a05 = 441
mesh_arms_a06 = 442
mesh_arms_a07 = 443
mesh_arms_a08 = 444
mesh_arms_a09 = 445
mesh_arms_a10 = 446
mesh_arms_a11 = 447
mesh_arms_a12 = 448
mesh_arms_a13 = 449
mesh_arms_a14 = 450
mesh_arms_a15 = 451
mesh_arms_a16 = 452
mesh_arms_a17 = 453
mesh_arms_a18 = 454
mesh_arms_a19 = 455
mesh_arms_a20 = 456
mesh_arms_a21 = 457
mesh_arms_b01 = 458
mesh_arms_b02 = 459
mesh_arms_b03 = 460
mesh_arms_b04 = 461
mesh_arms_b05 = 462
mesh_arms_b06 = 463
mesh_arms_b07 = 464
mesh_arms_b08 = 465
mesh_arms_b09 = 466
mesh_arms_b10 = 467
mesh_arms_b11 = 468
mesh_arms_b12 = 469
mesh_arms_b13 = 470
mesh_arms_b14 = 471
mesh_arms_b15 = 472
mesh_arms_b16 = 473
mesh_arms_b17 = 474
mesh_arms_b18 = 475
mesh_arms_b19 = 476
mesh_arms_b20 = 477
mesh_arms_b21 = 478
mesh_arms_c01 = 479
mesh_arms_c02 = 480
mesh_arms_c03 = 481
mesh_arms_c04 = 482
mesh_arms_c05 = 483
mesh_arms_c06 = 484
mesh_arms_c07 = 485
mesh_arms_c08 = 486
mesh_arms_c09 = 487
mesh_arms_c10 = 488
mesh_arms_c11 = 489
mesh_arms_c12 = 490
mesh_arms_c13 = 491
mesh_arms_c14 = 492
mesh_arms_c15 = 493
mesh_arms_c16 = 494
mesh_arms_c17 = 495
mesh_arms_c18 = 496
mesh_arms_c19 = 497
mesh_arms_c20 = 498
mesh_arms_c21 = 499
mesh_arms_d01 = 500
mesh_arms_d02 = 501
mesh_arms_d03 = 502
mesh_arms_d04 = 503
mesh_arms_d05 = 504
mesh_arms_d06 = 505
mesh_arms_d07 = 506
mesh_arms_d08 = 507
mesh_arms_d09 = 508
mesh_arms_d10 = 509
mesh_arms_d11 = 510
mesh_arms_d12 = 511
mesh_arms_d13 = 512
mesh_arms_d14 = 513
mesh_arms_d15 = 514
mesh_arms_d16 = 515
mesh_arms_d17 = 516
mesh_arms_d18 = 517
mesh_arms_d19 = 518
mesh_arms_d20 = 519
mesh_arms_d21 = 520
mesh_arms_e01 = 521
mesh_arms_e02 = 522
mesh_arms_e03 = 523
mesh_arms_e04 = 524
mesh_arms_e05 = 525
mesh_arms_e06 = 526
mesh_arms_e07 = 527
mesh_arms_e08 = 528
mesh_arms_e09 = 529
mesh_arms_e10 = 530
mesh_arms_e11 = 531
mesh_arms_e12 = 532
mesh_arms_e13 = 533
mesh_arms_e14 = 534
mesh_arms_e15 = 535
mesh_arms_e16 = 536
mesh_arms_e17 = 537
mesh_arms_e18 = 538
mesh_arms_e19 = 539
mesh_arms_e20 = 540
mesh_arms_e21 = 541
mesh_arms_f01 = 542
mesh_arms_f02 = 543
mesh_arms_f03 = 544
mesh_arms_f04 = 545
mesh_arms_f05 = 546
mesh_arms_f06 = 547
mesh_arms_f07 = 548
mesh_arms_f08 = 549
mesh_arms_f09 = 550
mesh_arms_f10 = 551
mesh_arms_f11 = 552
mesh_arms_f12 = 553
mesh_arms_f13 = 554
mesh_arms_f14 = 555
mesh_arms_f15 = 556
mesh_arms_f16 = 557
mesh_arms_f17 = 558
mesh_arms_f18 = 559
mesh_arms_f19 = 560
mesh_arms_f20 = 561
mesh_arms_h01 = 562
mesh_arms_h02 = 563
mesh_arms_h03 = 564
mesh_arms_h04 = 565
mesh_arms_h05 = 566
mesh_arms_h06 = 567
mesh_arms_h07 = 568
mesh_arms_h08 = 569
mesh_arms_h09 = 570
mesh_arms_h10 = 571
mesh_arms_h11 = 572
mesh_arms_h12 = 573
mesh_arms_h13 = 574
mesh_arms_h14 = 575
mesh_arms_h15 = 576
mesh_arms_h16 = 577
mesh_arms_h17 = 578
mesh_arms_h18 = 579
mesh_arms_h19 = 580
mesh_arms_h20 = 581
mesh_arms_h21 = 582
mesh_arms_i01 = 583
mesh_arms_i02 = 584
mesh_arms_i03 = 585
mesh_arms_i04 = 586
mesh_arms_i05 = 587
mesh_arms_i06 = 588
mesh_arms_i07 = 589
mesh_arms_i08 = 590
mesh_arms_i09 = 591
mesh_arms_i10 = 592
mesh_arms_i11 = 593
mesh_arms_i12 = 594
mesh_arms_i13 = 595
mesh_arms_i14 = 596
mesh_arms_i15 = 597
mesh_arms_i16 = 598
mesh_arms_i17 = 599
mesh_arms_i18 = 600
mesh_arms_i19 = 601
mesh_arms_i20 = 602
mesh_arms_i21 = 603
mesh_arms_k01 = 604
mesh_arms_k02 = 605
mesh_arms_k03 = 606
mesh_arms_k04 = 607
mesh_arms_k05 = 608
mesh_arms_k06 = 609
mesh_arms_k07 = 610
mesh_arms_k08 = 611
mesh_arms_k09 = 612
mesh_arms_k10 = 613
mesh_arms_k11 = 614
mesh_arms_k12 = 615
mesh_arms_k13 = 616
mesh_arms_k14 = 617
mesh_arms_k15 = 618
mesh_arms_k16 = 619
mesh_arms_k17 = 620
mesh_arms_k18 = 621
mesh_arms_k19 = 622
mesh_arms_k20 = 623
mesh_arms_g01 = 624
mesh_arms_g02 = 625
mesh_arms_g03 = 626
mesh_arms_g04 = 627
mesh_arms_g05 = 628
mesh_arms_g06 = 629
mesh_arms_g07 = 630
mesh_arms_g08 = 631
mesh_arms_g09 = 632
mesh_arms_g10 = 633
mesh_arms_kingdom_a = 634
mesh_arms_kingdom_b = 635
mesh_arms_kingdom_c = 636
mesh_arms_kingdom_d = 637
mesh_arms_kingdom_e = 638
mesh_arms_kingdom_f = 639
mesh_arms_kingdom_g = 640
mesh_arms_kingdom_h = 641
mesh_arms_kingdom_i = 642
mesh_arms_kingdom_j = 643
mesh_arms_kingdom_k = 644
mesh_arms_kingdom_l = 645
mesh_arms_kingdom_ll = 646
mesh_arms_kingdom_m = 647
mesh_arms_kingdom_n = 648
mesh_arms_kingdom_o = 649
mesh_arms_kingdom_p = 650
mesh_arms_kingdom_q = 651
mesh_arms_kingdom_r = 652
mesh_arms_kingdom_s = 653
mesh_arms_kingdom_t = 654
mesh_arms_kingdom_u = 655
mesh_arms_kingdom_v = 656
mesh_arms_kingdom_w = 657
mesh_arms_kingdom_x = 658
mesh_arms_kingdom_y = 659
mesh_arms_kingdom_z = 660
mesh_arms_kingdom_2a = 661
mesh_arms_kingdom_2b = 662
mesh_arms_kingdom_2c = 663
mesh_arms_kingdom_2d = 664
mesh_arms_k21 = 665
mesh_banners_default_a = 666
mesh_banners_default_b = 667
mesh_banners_default_c = 668
mesh_banners_default_d = 669
mesh_banners_default_e = 670
mesh_troop_label_banner = 671
mesh_ui_kingdom_shield_1 = 672
mesh_ui_kingdom_shield_2 = 673
mesh_ui_kingdom_shield_3 = 674
mesh_ui_kingdom_shield_4 = 675
mesh_ui_kingdom_shield_5 = 676
mesh_ui_kingdom_shield_6 = 677
mesh_ui_kingdom_shield_7 = 678
mesh_ui_kingdom_shield_8 = 679
mesh_ui_kingdom_shield_9 = 680
mesh_ui_kingdom_shield_10 = 681
mesh_ui_kingdom_shield_11 = 682
mesh_ui_kingdom_shield_12 = 683
mesh_ui_kingdom_shield_13 = 684
mesh_ui_kingdom_shield_14 = 685
mesh_ui_kingdom_shield_15 = 686
mesh_ui_kingdom_shield_16 = 687
mesh_ui_kingdom_shield_17 = 688
mesh_ui_kingdom_shield_18 = 689
mesh_ui_kingdom_shield_19 = 690
mesh_ui_kingdom_shield_20 = 691
mesh_ui_kingdom_shield_21 = 692
mesh_ui_kingdom_shield_22 = 693
mesh_ui_kingdom_shield_23 = 694
mesh_ui_kingdom_shield_24 = 695
mesh_ui_kingdom_shield_25 = 696
mesh_ui_kingdom_shield_26 = 697
mesh_ui_kingdom_shield_27 = 698
mesh_ui_kingdom_shield_28 = 699
mesh_ui_kingdom_shield_29 = 700
mesh_ui_kingdom_shield_30 = 701
mesh_ui_kingdom_shield_31 = 702
mesh_mouse_arrow_down = 703
mesh_mouse_arrow_right = 704
mesh_mouse_arrow_left = 705
mesh_mouse_arrow_up = 706
mesh_mouse_arrow_plus = 707
mesh_mouse_left_click = 708
mesh_mouse_right_click = 709
mesh_status_ammo_ready = 710
mesh_main_menu_background = 711
mesh_loading_background = 712
mesh_ui_quick_battle_a = 713
mesh_white_bg_plane_a = 714
mesh_cb_ui_icon_infantry = 715
mesh_cb_ui_icon_archer = 716
mesh_cb_ui_icon_horseman = 717
mesh_cb_ui_main = 718
mesh_cb_ui_maps_scene_01 = 719
mesh_cb_ui_maps_scene_02 = 720
mesh_cb_ui_maps_scene_03 = 721
mesh_cb_ui_maps_scene_04 = 722
mesh_cb_ui_maps_scene_05 = 723
mesh_cb_ui_maps_scene_06 = 724
mesh_cb_ui_maps_scene_07 = 725
mesh_cb_ui_maps_scene_08 = 726
mesh_cb_ui_maps_scene_09 = 727
mesh_mp_ui_host_maps_14 = 728
mesh_mp_ui_host_maps_15 = 729
mesh_ui_kingdom_shield_7 = 730
mesh_flag_project_rb = 731
mesh_flag_project_rb_miss = 732
mesh_mp_ui_host_maps_16 = 733
mesh_mp_ui_host_maps_17 = 734
mesh_mp_ui_host_maps_18 = 735
mesh_mp_ui_host_maps_19 = 736
mesh_mp_ui_host_maps_20 = 737
mesh_pic_mb_warrior_2 = 738
mesh_pic_mb_warrior_3 = 739
mesh_pic_mb_warrior_4 = 740
mesh_pic_mercenary = 741
mesh_facegen_board = 742
mesh_status_background = 743
mesh_status_health_bar = 744
mesh_game_log_window = 745
mesh_restore_game_panel = 746
mesh_message_window = 747
mesh_party_window_b = 748
mesh_party_member_button = 749
mesh_party_member_button_pressed = 750
mesh_longer_button = 751
mesh_longer_button_down = 752
mesh_button_1 = 753
mesh_button_1_down = 754
mesh_used_button = 755
mesh_used_button_down = 756
mesh_longer_button = 757
mesh_longer_button_down = 758
mesh_options_window = 759
mesh_message_window = 760
mesh_note_window = 761
mesh_left_button = 762
mesh_left_button_down = 763
mesh_left_button_hl = 764
mesh_right_button = 765
mesh_right_button_down = 766
mesh_right_button_hl = 767
mesh_center_button = 768
mesh_drop_button = 769
mesh_drop_button_down = 770
mesh_drop_button_hl = 771
mesh_drop_button_child = 772
mesh_drop_button_child_down = 773
mesh_drop_button_child_hl = 774
mesh_num_1 = 775
mesh_num_2 = 776
mesh_num_3 = 777
mesh_num_4 = 778
mesh_num_5 = 779
mesh_num_6 = 780
mesh_num_7 = 781
mesh_num_8 = 782
mesh_num_9 = 783
mesh_num_10 = 784
mesh_num_11 = 785
mesh_num_12 = 786
mesh_num_13 = 787
mesh_num_14 = 788
mesh_num_15 = 789
mesh_num_16 = 790
mesh_num_17 = 791
mesh_num_18 = 792
mesh_num_19 = 793
mesh_num_20 = 794
mesh_num_21 = 795
mesh_num_22 = 796
mesh_num_23 = 797
mesh_num_24 = 798
mesh_num_25 = 799
mesh_num_26 = 800
mesh_num_27 = 801
mesh_num_28 = 802
mesh_num_29 = 803
mesh_num_30 = 804
mesh_num_31 = 805
mesh_num_32 = 806
mesh_num_33 = 807
mesh_num_34 = 808
mesh_num_35 = 809
mesh_num_36 = 810
mesh_num_37 = 811
mesh_num_38 = 812
mesh_num_39 = 813
mesh_num_40 = 814
mesh_num_41 = 815
mesh_num_42 = 816
mesh_num_43 = 817
mesh_num_44 = 818
mesh_num_45 = 819
mesh_num_46 = 820
mesh_num_47 = 821
mesh_num_48 = 822
mesh_message_window = 823
mesh_face_gen_window = 824
mesh_order_frame = 825
mesh_tableau_mesh_early_transitional_heraldic_banner = 826
mesh_tableau_mesh_early_transitional_heraldic = 827
mesh_tableau_mesh_samurai_heraldic_flag = 828
mesh_tableau_mesh_banner_spear = 829
mesh_invisi_st_plane_fullsc = 830
mesh_bt_flag_1 = 831
mesh_bt_flag_2 = 832
mesh_bt_flag_3 = 833
mesh_pic_bt_crossbow = 834
mesh_pic_bt_shield = 835
mesh_pic_bt_horse_archer = 836
mesh_pic_bt_twohand = 837
mesh_pic_bt_bow = 838
mesh_pic_bt_horse = 839
mesh_pic_bt_musket = 840
mesh_pic_bt_leader = 841
mesh_bt_cion_tier1 = 842
mesh_bt_cion_tier2 = 843
mesh_bt_cion_tier3 = 844
mesh_bt_cion_tier4 = 845
mesh_bt_cion_tier5 = 846
mesh_bt_cion_tier6 = 847
mesh_pic_bt_charge_auto = 848
mesh_pic_bt_hold = 849
mesh_pic_bt_followme = 850
mesh_pic_bt_unite = 851
mesh_pic_bt_divide = 852
mesh_pic_bt_advan = 853
mesh_pic_bt_fall = 854
mesh_pic_bt_holdfire = 855
mesh_pic_bt_anyw = 856
mesh_pic_bt_clicked = 857
mesh_pic_bt_return = 858
mesh_pic_camp_meet = 859
mesh_pic_meetlady = 860
mesh_pic_meetlady2 = 861
mesh_pic_meetlady3 = 862
mesh_1pic_ruin_0 = 863
mesh_1pic_ruin_1 = 864
mesh_1pic_ruin_2 = 865
mesh_1pic_ruin_3 = 866
mesh_1pic_ruin_4 = 867
mesh_1pic_ruin_5 = 868
mesh_1pic_ruin_6 = 869
mesh_1pic_ruin_7 = 870
mesh_1pic_ruin_8 = 871
mesh_1pic_ruin_9 = 872
mesh_1pic_ruin_10 = 873
mesh_1pic_ruin_11 = 874
mesh_1pic_ruin_12 = 875
mesh_1pic_ruin_13 = 876
mesh_1pic_ruin_14 = 877
mesh_1pic_ruin_15 = 878
mesh_1pic_ruin_16 = 879
mesh_1pic_ruin_17 = 880
mesh_1pic_ruin_18 = 881
mesh_1pic_ruin_19 = 882
mesh_1pic_ruin_20 = 883
mesh_1pic_ruin_21 = 884
mesh_1pic_ruin_22 = 885
mesh_1pic_ruin_23 = 886
mesh_1pic_ruin_24 = 887
mesh_1pic_ruin_25 = 888
mesh_1pic_ruin_26 = 889
mesh_1pic_ruin_27 = 890
mesh_1pic_ruin_28 = 891
mesh_1pic_ruin_29 = 892
mesh_1pic_ruin_30 = 893
mesh_1pic_ruin_31 = 894
mesh_1pic_ruin_32 = 895
mesh_1pic_ruin_33 = 896
mesh_1pic_ruin_34 = 897
mesh_1pic_ruin_35 = 898
mesh_1pic_ruin_36 = 899
mesh_1pic_ruin_37 = 900
mesh_1pic_ruin_38 = 901
mesh_1pic_ruin_39 = 902
mesh_1pic_ruin_40 = 903
mesh_1pic_ruin_41 = 904
mesh_1pic_ruin_42 = 905
mesh_1pic_ruin_43 = 906
mesh_1pic_ruin_44 = 907
mesh_1pic_ruin_45 = 908
mesh_1pic_ruin_46 = 909
mesh_1pic_ruin_47 = 910
mesh_1pic_ruin_48 = 911
mesh_1pic_ruin_49 = 912
mesh_1pic_ruin_50 = 913
mesh_1pic_ruin_51 = 914
mesh_1pic_ruin_52 = 915
mesh_1pic_ruin_53 = 916
mesh_1pic_ruin_54 = 917
mesh_1pic_ruin_55 = 918
mesh_1pic_ruin_56 = 919
mesh_1pic_ruin_57 = 920
mesh_1pic_ruin_58 = 921
mesh_1pic_ruin_59 = 922
mesh_1pic_ruin_60 = 923
mesh_1pic_ruin_61 = 924
mesh_1pic_ruin_62 = 925
mesh_1pic_ruin_63 = 926
mesh_1pic_ruin_64 = 927
mesh_1pic_ruin_65 = 928
mesh_1pic_ruin_66 = 929
mesh_1pic_ruin_67 = 930
mesh_1pic_ruin_68 = 931
mesh_1pic_ruin_69 = 932
mesh_1pic_ruin_70 = 933
mesh_1pic_ruin_71 = 934
mesh_1pic_ruin_72 = 935
mesh_1pic_ruin_73 = 936
mesh_1pic_ruin_74 = 937
mesh_1pic_ruin_75 = 938
mesh_1pic_ruin_76 = 939
mesh_1pic_ruin_77 = 940
mesh_1pic_ruin_78 = 941
mesh_1pic_ruin_79 = 942
mesh_1pic_ruin_80 = 943
mesh_1pic_ruin_81 = 944
mesh_1pic_ruin_82 = 945
mesh_1pic_ruin_83 = 946
mesh_1pic_ruin_84 = 947
mesh_1pic_ruin_85 = 948
mesh_1pic_ruin_86 = 949
mesh_1pic_ruin_87 = 950
mesh_1pic_ruin_88 = 951
mesh_1pic_ruin_89 = 952
mesh_1pic_ruin_90 = 953
mesh_1pic_ruin_91 = 954
mesh_1pic_ruin_92 = 955
mesh_1pic_ruin_93 = 956
mesh_1pic_ruin_94 = 957
mesh_1pic_ruin_95 = 958
mesh_1pic_ruin_96 = 959
mesh_1pic_ruin_97 = 960
mesh_1pic_ruin_98 = 961
mesh_1pic_ruin_99 = 962
mesh_1pic_ruin_100 = 963
mesh_1pic_ruin_101 = 964
mesh_1pic_ruin_102 = 965
mesh_1pic_ruin_103 = 966
mesh_1pic_ruin_104 = 967
mesh_1pic_ruin_105 = 968
mesh_1pic_ruin_106 = 969
mesh_1pic_ruin_107 = 970
mesh_1pic_ruin_108 = 971
mesh_1pic_ruin_109 = 972
mesh_1pic_ruin_110 = 973
mesh_1pic_ruin_111 = 974
mesh_1pic_ruin_112 = 975
mesh_1pic_ruin_113 = 976
mesh_1pic_ruin_114 = 977
mesh_1pic_ruin_115 = 978
mesh_1pic_ruin_116 = 979
mesh_1pic_ruin_117 = 980
mesh_1pic_ruin_118 = 981
mesh_1pic_ruin_119 = 982
mesh_1pic_ruin_120 = 983
mesh_1pic_ruin_121 = 984
mesh_1pic_ruin_122 = 985
mesh_1pic_ruin_123 = 986
mesh_1pic_ruin_124 = 987
mesh_1pic_ruin_125 = 988
mesh_1pic_ruin_126 = 989
mesh_1pic_ruin_127 = 990
mesh_1pic_ruin_128 = 991
mesh_1pic_ruin_129 = 992
mesh_1pic_ruin_130 = 993
mesh_1pic_ruin_131 = 994
mesh_1pic_ruin_132 = 995
mesh_1pic_ruin_133 = 996
mesh_1pic_ruin_134 = 997
mesh_1pic_ruin_135 = 998
mesh_1pic_ruin_136 = 999
mesh_1pic_ruin_137 = 1000
mesh_1pic_ruin_138 = 1001
mesh_1pic_ruin_139 = 1002
mesh_1pic_ruin_140 = 1003
mesh_1pic_ruin_141 = 1004
mesh_1pic_ruin_142 = 1005
mesh_1pic_ruin_143 = 1006
mesh_1pic_ruin_144 = 1007
mesh_1pic_ruin_145 = 1008
mesh_1pic_ruin_146 = 1009
mesh_1pic_ruin_ex1 = 1010
mesh_1pic_ruin_ex2 = 1011
mesh_1pic_ruin_ex3 = 1012
mesh_1pic_ruin_ex4 = 1013
mesh_1pic_ruin_ex5 = 1014
mesh_1pic_ruin_ex6 = 1015
mesh_1pic_ruin_ex7 = 1016
mesh_1pic_ruin_ex8 = 1017
mesh_1pic_ruin_ex9 = 1018
mesh_1pic_ruin_ex10 = 1019
mesh_1pic_ruin_ex11 = 1020
mesh_1pic_ruin_ex12 = 1021
mesh_1pic_ruin_ex13 = 1022
mesh_1pic_ruin_ex14 = 1023
mesh_1pic_ruin_ex15 = 1024
mesh_1pic_ruin_ex16 = 1025
mesh_1pic_ruin_ex17 = 1026
mesh_1pic_ruin_ex18 = 1027
mesh_1pic_ruin_ex19 = 1028
mesh_1pic_ruin_ex20 = 1029
mesh_1pic_ruin_ex21 = 1030
mesh_1pic_ruin_ex22 = 1031
mesh_1pic_ruin_ex23 = 1032
mesh_1pic_ruin_ex24 = 1033
mesh_1pic_ruin_ex25 = 1034
mesh_pic_encounter1 = 1035
mesh_pic_encounter2 = 1036
mesh_pic_encounter3 = 1037
mesh_pic_xex8 = 1038
mesh_pic_xex9 = 1039
mesh_pic_xex10 = 1040
mesh_pic_xex11 = 1041
mesh_pic_xex12 = 1042
mesh_pic_xex13 = 1043
mesh_pic_xex14 = 1044
mesh_st_tercio = 1045
mesh_st_pincer_movement = 1046
mesh_encounter4vik = 1047
mesh_encounter5pirate = 1048
mesh_pic_ship_shipyard = 1049
mesh_st_pic_plain = 1050
mesh_st_pic_desert = 1051
mesh_st_pic_mount = 1052
mesh_st_pic_snow = 1053
mesh_st_pic_sea = 1054
mesh_st_lancecharge = 1055
mesh_st_ccccharge = 1056
mesh_st_viking = 1057
mesh_black_st_plane = 1058
mesh_invisi_st_plane = 1059
mesh_pic_invisi_backgrounds = 1060
mesh_pic_policy_choose_prt = 1061
mesh_pic_policy_choose_prt_bk = 1062
mesh_pic_religion_screenn = 1063
mesh_pic_gbt_punch = 1064
mesh_pic_gbt_lick = 1065
mesh_pic_gbt_finger = 1066
mesh_pic_gbt_love = 1067
mesh_pic_gbt_place = 1068
mesh_pic_gbt_bed_sheet = 1069
mesh_pic_money_bag = 1070
mesh_pic_sea_backg = 1071
mesh_tableau_mesh_flag = 1072
mesh_pic_backg_inv = 1073
mesh_pic_library = 1074
mesh_pic_fuck_back = 1075
mesh_pic_ghost_ship_encount = 1076
mesh_pic_visit_train = 1077
mesh_pic_weknow = 1078
mesh_pic_bank_back = 1079
mesh_pic_wm_blank = 1080
mesh_pic_wm_horse = 1081
mesh_pic_wm_finewood = 1082
mesh_pic_wm_iron = 1083
mesh_pic_wm_elephant = 1084
mesh_pic_wm_whale = 1085
mesh_pic_wm_fish = 1086
mesh_pic_wm_maize = 1087
mesh_pic_wm_copper = 1088
mesh_pic_wm_marble = 1089
mesh_pic_wm_pearl = 1090
mesh_pic_wm_gem = 1091
mesh_pic_wm_ceramic = 1092
mesh_pic_wm_gold = 1093
mesh_pic_wm_silver = 1094
mesh_pic_wm_ivory = 1095
mesh_pic_wm_coffee = 1096
mesh_pic_wm_cacao = 1097
mesh_pic_wm_silk = 1098
mesh_pic_wm_nutmeg = 1099
mesh_pic_wm_allspice = 1100
mesh_pic_wm_cinnamon = 1101
mesh_pic_wm_clove = 1102
mesh_pic_wm_pepper = 1103
mesh_pic_wm_tabaco = 1104
mesh_pic_wm_tea = 1105
mesh_pic_marry = 1106
mesh_pic_religion_symbol_0 = 1107
mesh_pic_religion_symbol_1 = 1108
mesh_pic_religion_symbol_2 = 1109
mesh_pic_religion_symbol_3 = 1110
mesh_pic_religion_symbol_4 = 1111
mesh_pic_religion_symbol_5 = 1112
mesh_pic_religion_symbol_6 = 1113
mesh_pic_religion_symbol_7 = 1114
mesh_pic_religion_symbol_8 = 1115
mesh_pic_religion_symbol_9 = 1116
mesh_pic_religion_symbol_10 = 1117
mesh_pic_religion_symbol_11 = 1118
mesh_pic_religion_symbol_12 = 1119
mesh_pic_religion_symbol_13 = 1120
mesh_pic_religion_symbol_14 = 1121
mesh_pic_religion_symbol_15 = 1122
mesh_pic_religion_symbol_16 = 1123
mesh_pic_disaster_volcano = 1124
mesh_pic_disaster_earthquake = 1125
mesh_pic_disaster_storm = 1126
mesh_pic_disaster_typhoon = 1127
mesh_pic_disaster_fire = 1128
mesh_pic_disaster_sand = 1129
mesh_pic_disaster_tides = 1130
mesh_pic_disaster_ice = 1131
mesh_pic_disaster_flood = 1132
mesh_flag_div_1 = 1133
mesh_flag_div_2 = 1134
mesh_flag_div_3 = 1135
mesh_flag_div_4 = 1136
mesh_flag_div_5 = 1137
mesh_flag_div_6 = 1138
mesh_flag_div_7 = 1139
mesh_flag_div_8 = 1140
mesh_flag_div_9 = 1141
mesh_pic_battle_tile_2 = 1142
mesh_pic_battle_tile_3 = 1143
mesh_pic_battle_tile_4 = 1144
mesh_pic_battle_tile_5 = 1145
mesh_pic_battle_tile_6 = 1146
mesh_pic_battle_tile_7 = 1147
mesh_pic_battle_tile_8 = 1148
mesh_pic_battle_tile_9 = 1149
mesh_pic_battle_tile_10 = 1150
mesh_pic_battle_tile_11 = 1151
mesh_pic_battle_tile_s1 = 1152
mesh_pic_battle_tile_s2 = 1153
mesh_pic_battle_tile_s3 = 1154
mesh_pic_battle_tile_s4 = 1155
mesh_pic_battle_tile_n1 = 1156
mesh_pic_gameover = 1157
mesh_pic_cla_mercernary = 1158
mesh_pic_cla_merchant = 1159
mesh_pic_cla_adventurer = 1160
mesh_pic_cla_lord = 1161
mesh_pic_cla_bandit = 1162
mesh_pic_cla_pirate = 1163
mesh_pic_ptown_euro = 1164
mesh_pic_ptown_snow = 1165
mesh_pic_ptown_roman = 1166
mesh_pic_ptown_arab = 1167
mesh_pic_ptown_wooden = 1168
mesh_pic_ptown_asia = 1169
mesh_pic_ptown_asia_2 = 1170
mesh_pic_ptown_jap = 1171
mesh_pic_ptown_uurt = 1172
mesh_pic_ptown_teepee = 1173
mesh_pic_meetlady4 = 1174
mesh_pic_battle_formation_backriver = 1175
mesh_pic_battle_formation_sideattack = 1176
mesh_pic_battle_formation_backattack = 1177
mesh_pic_battle_formation_8door = 1178
mesh_pic_battle_formation_encampment = 1179
mesh_pic_battle_formation_lionheart = 1180
mesh_pic_battle_formation_mangudai = 1181
mesh_pic_battle_formation_pincer = 1182
mesh_pic_battle_formation_base = 1183
mesh_OrteliusWorldMap1570 = 1184
mesh_pic_portrait_yoritomo = 1185
mesh_pic_portrait_munemori = 1186
mesh_pic_portrait_xiaozong = 1187
mesh_pic_portrait_shizong = 1188
mesh_pic_portrait_genghiskhan = 1189
mesh_pic_portrait_philip_ii = 1190
mesh_pic_portrait_richard_i = 1191
mesh_pic_portrait_barbarossa = 1192
mesh_pic_portrait_alfonso_viii = 1193
mesh_pic_portrait_yaqub = 1194
mesh_pic_portrait_baldwin = 1195
mesh_pic_portrait_saladin = 1196
mesh_pic_portrait_tekish = 1197
mesh_pic_portrait_ghiyath = 1198
mesh_pic_portrait_akbar = 1199
mesh_pic_portrait_ivan = 1200
mesh_pic_portrait_frederick_ii = 1201
mesh_pic_portrait_maxi = 1202
mesh_pic_portrait_john_iii = 1203
mesh_pic_portrait_selimii = 1204
mesh_pic_portrait_stephen = 1205
mesh_pic_portrait_elizabeth = 1206
mesh_pic_portrait_philip = 1207
mesh_pic_portrait_sebastian = 1208
mesh_pic_portrait_william = 1209
mesh_pic_portrait_wanli = 1210
mesh_pic_portrait_oda = 1211
mesh_town_t_plain = 1212
mesh_town_t_water = 1213
mesh_town_t_hill = 1214
mesh_town_t_desert = 1215
mesh_town_t_snow = 1216
mesh_town_t_mountain = 1217
mesh_town_t_mil = 1218
mesh_town_t_ore = 1219
mesh_town_t_horse = 1220
mesh_town_t_holy = 1221
mesh_town_t_pasture = 1222
mesh_town_t_mine = 1223
mesh_town_t_market = 1224
mesh_town_t_barrack = 1225
mesh_town_t_farm = 1226
mesh_town_t_hall = 1227
mesh_town_t_prison = 1228
mesh_town_t_library = 1229
mesh_town_t_temple = 1230
mesh_town_t_smithy = 1231
mesh_white_plane_upper = 1232
mesh_white_plane_center = 1233
mesh_town_e_onehand = 1234
mesh_town_e_twohand = 1235
mesh_town_e_polearm = 1236
mesh_town_e_bow = 1237
mesh_town_e_crossbow = 1238
mesh_town_e_arquebus = 1239
mesh_town_e_ammo = 1240
mesh_town_e_light = 1241
mesh_town_e_heavy = 1242
mesh_town_e_horse = 1243
mesh_town_e_siege = 1244
mesh_town_e_wood = 1245
mesh_town_e_shipammo = 1246
mesh_town_d_onehand = 1247
mesh_town_d_twohand = 1248
mesh_town_d_polearm = 1249
mesh_town_d_bow = 1250
mesh_town_d_crossbow = 1251
mesh_town_d_arquebus = 1252
mesh_town_d_ammo = 1253
mesh_town_d_light = 1254
mesh_town_d_heavy = 1255
mesh_town_d_horse = 1256
mesh_town_d_siege = 1257
mesh_town_d_wood = 1258
mesh_town_d_shipammo = 1259
mesh_status_troop_ratio_bar = 1260
mesh_status_troop_ratio_bar_button = 1261
| 25.528063 | 58 | 0.843619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a0cc84ea1f11da3af87cb6aff03136b234f94184 | 30,936 | py | Python | q2_longitudinal/_vega.py | thermokarst/q2-longitudinal | 1967617214417b7097ce96e4a7dfdfbb5fd17faf | [
"BSD-3-Clause"
]
| null | null | null | q2_longitudinal/_vega.py | thermokarst/q2-longitudinal | 1967617214417b7097ce96e4a7dfdfbb5fd17faf | [
"BSD-3-Clause"
]
| null | null | null | q2_longitudinal/_vega.py | thermokarst/q2-longitudinal | 1967617214417b7097ce96e4a7dfdfbb5fd17faf | [
"BSD-3-Clause"
]
| null | null | null | # ----------------------------------------------------------------------------
# Copyright (c) 2017-2018, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import json
import pandas as pd
def _render_volatility_spec(data: pd.DataFrame, individual_id: str, state: str,
default_group: str, group_columns: list,
default_metric: str, metric_columns: list,
yscale: str) -> str:
# Double-quotes for many of the strings below, just so that we don't have
# to escape the single quotes - it is already hard enough to read when
# wrapped.
opacity_test = ("!length(data('selected')) || "
"indata('selected', 'value', datum.value)")
group_test = ("!length(data('selected')) || "
"indata('selected', 'value', datum.groupByVal)")
error_bar_test = 'showErrorBars && (%s)' % group_test
metric_signal = {'signal': 'metric'}
group_signal = {'signal': 'grouper'}
# This looks grosser than it is (you can't do variable assignment in a
# vega expr, so no temp helper vars) - basically find the min and max
# extents of the metric in question for the y-axis rendering, including
# the 3x stdev (depending on the spread this could be beyond the metric's
# own limits.
domain_expr = ("[min(data('globalVals')[0].cl0,"
"data('globalVals')[0].minY),"
"max(data('globalVals')[0].cl3,"
"data('globalVals')[0].maxY)]")
# These templates customize the tooltips
mean_signal = ('{"title": "group mean", "group": datum.groupByVal,'
' "state": datum["%s"], "count": datum.count,'
' "mean": datum.mean, "ci0": datum.ci0, "ci1": datum.ci1}'
% state)
marks = [
{
'type': 'rule',
'from': {
'data': 'globalVals',
},
'encode': {
'update': {
'strokeWidth': {
'value': 2,
},
'x': {
'scale': 'x',
'field': 'minX',
},
'x2': {
'scale': 'x',
'field': 'maxX',
},
'y': {
'scale': 'y',
'field': 'mean',
},
'strokeOpacity': [
{
'test': 'showGlobalMean',
'value': 1.0,
},
{
'value': 0.0,
},
],
},
},
},
{
'type': 'rule',
'from': {
'data': 'globalVals',
},
'encode': {
'update': {
'strokeWidth': {
'value': 2,
},
'strokeDash': {
'value': [8, 8],
},
'x': {
'scale': 'x',
'field': 'minX',
},
'x2': {
'scale': 'x',
'field': 'maxX',
},
'y': {
'scale': 'y',
'field': 'cl0',
},
'strokeOpacity': [
{
'test': 'showGlobalControlLimits',
'value': 1.0,
},
{
'value': 0.0,
},
],
},
},
},
{
'type': 'rule',
'from': {
'data': 'globalVals',
},
'encode': {
'update': {
'strokeWidth': {
'value': 2,
},
'strokeDash': {
'value': [6, 2],
},
'x': {
'scale': 'x',
'field': 'minX',
},
'x2': {
'scale': 'x',
'field': 'maxX',
},
'y': {
'scale': 'y',
'field': 'cl1',
},
'strokeOpacity': [
{
'test': 'showGlobalControlLimits',
'value': 1.0,
},
{
'value': 0.0,
},
],
},
},
},
{
'type': 'rule',
'from': {
'data': 'globalVals',
},
'encode': {
'update': {
'strokeWidth': {
'value': 2,
},
'strokeDash': {
'value': [6, 2],
},
'x': {
'scale': 'x',
'field': 'minX',
},
'x2': {
'scale': 'x',
'field': 'maxX',
},
'y': {
'scale': 'y',
'field': 'cl2',
},
'strokeOpacity': [
{
'test': 'showGlobalControlLimits',
'value': 1.0,
},
{
'value': 0.0,
},
],
},
},
},
{
'type': 'rule',
'from': {
'data': 'globalVals',
},
'encode': {
'update': {
'strokeWidth': {
'value': 2,
},
'strokeDash': {
'value': [8, 8],
},
'x': {
'scale': 'x',
'field': 'minX',
},
'x2': {
'scale': 'x',
'field': 'maxX',
},
'y': {
'scale': 'y',
'field': 'cl3',
},
'strokeOpacity': [
{
'test': 'showGlobalControlLimits',
'value': 1.0,
},
{
'value': 0.0,
},
],
},
},
},
{
'type': 'group',
'from': {
'facet': {
'name': 'series',
'data': 'aggBy',
'groupby': 'groupByVal',
},
},
'marks': [
{
'type': 'line',
'from': {
'data': 'series',
},
'sort': {
'field': 'datum.%s' % state,
'order': 'ascending',
},
'encode': {
'update': {
'x': {
'scale': 'x',
'field': state,
},
'y': {
'scale': 'y',
'field': 'mean',
},
'stroke': {
'scale': 'color',
'field': 'groupByVal',
},
'strokeWidth': {
'signal': 'meanLineThickness',
},
'opacity': [
{
'test': group_test,
'signal': 'meanLineOpacity',
},
{
'value': 0.0,
},
],
},
},
},
# Need to add symbols into plot for mouseover
# https://github.com/vega/vega-tooltip/issues/120
{
'type': 'symbol',
'from': {
'data': 'series',
},
'encode': {
'update': {
'tooltip': {
'signal': mean_signal,
},
'x': {
'scale': 'x',
'field': state,
},
'y': {
'scale': 'y',
'field': 'mean',
},
'stroke': {
'scale': 'color',
'field': 'groupByVal',
},
'fill': {
'scale': 'color',
'field': 'groupByVal',
},
'size': {
'signal': 'meanSymbolSize',
},
'opacity': [
{
'test': group_test,
'signal': 'meanSymbolOpacity',
},
{
'value': 0.0,
},
],
},
},
},
{
'type': 'rect',
'from': {
'data': 'series',
},
'encode': {
'update': {
'width': {
'value': 2.0,
},
'x': {
'scale': 'x',
'field': state,
'band': 0.5,
},
'y': {
'scale': 'y',
'field': 'ci0',
},
'y2': {
'scale': 'y',
'field': 'ci1',
},
'fill': {
'scale': 'color',
'field': 'groupByVal',
},
'opacity': [
{
'test': error_bar_test,
'value': 1.0,
},
{
'value': 0.0,
},
],
},
},
},
],
},
]
signals = [
{
'name': 'grouper',
'value': default_group,
'bind': {
'input': 'select',
'element': '#group-column',
'options': group_columns,
}
},
{
'name': 'metric',
'value': default_metric,
'bind': {
'input': 'select',
'element': '#metric-column',
'options': metric_columns,
},
},
{
'name': 'width',
'value': '',
'bind': {
'input': 'text',
},
'on': [
{
'events': {
'source': 'window',
'type': 'resize',
},
'update': 'containerSize()[0]',
},
],
},
{
'name': 'showErrorBars',
'value': False,
'bind': {
'input': 'checkbox',
'element': '#toggle-error-bars',
},
},
{
'name': 'showGlobalMean',
'value': False,
'bind': {
'input': 'checkbox',
'element': '#toggle-global-mean',
},
},
{
'name': 'showGlobalControlLimits',
'value': False,
'bind': {
'input': 'checkbox',
'element': '#toggle-global-control-limits',
},
},
{
'name': 'meanLineThickness',
'value': 3,
'bind': {
'input': 'range',
'min': 0.1,
'max': 10,
'step': 0.1,
'element': '#mean-line-thickness',
},
},
{
'name': 'meanLineOpacity',
'value': 1.0,
'bind': {
'input': 'range',
'min': 0.0,
'max': 1.0,
'step': 0.01,
'element': '#mean-line-opacity',
},
},
{
'name': 'meanSymbolSize',
'value': 50.0,
'bind': {
'input': 'range',
'min': 0.0,
'max': 500.0,
'step': 1.0,
'element': '#mean-symbol-size',
},
},
{
'name': 'meanSymbolOpacity',
'value': 0.0,
'bind': {
'input': 'range',
'min': 0.0,
'max': 1.0,
'step': 0.01,
'element': '#mean-symbol-opacity',
},
},
{
'name': 'colorScheme',
'value': 'category10',
'bind': {
'input': 'select',
'element': '#color-scheme',
'options': [
'accent',
'category10',
'category20',
'category20b',
'category20c',
'dark2',
'paired',
'pastel1',
'pastel2',
'set1',
'set2',
'set3',
'tableau10',
'tableau20',
],
}
},
{
'name': 'clear',
'value': True,
'on': [
{
'events': 'mouseup[!event.item]',
'update': 'true',
'force': True,
},
],
},
{
'name': 'shift',
'value': False,
'on': [
{
'events': '@legendSymbol:click, @legendLabel:click',
'update': 'event.shiftKey',
'force': True,
},
],
},
{
'name': 'clicked',
'value': None,
'on': [
{
'events': '@legendSymbol:click, @legendLabel:click',
'update': '{value: datum.value}',
'force': True,
},
],
}]
if individual_id:
spaghetti_signal = ('{"title": "spaghetti", "individual_id": '
'datum["%s"], "group": datum.groupByVal, "state": '
'datum["%s"], "metric": datum.metricVal}' %
(individual_id, state))
marks.append({
'type': 'group',
'from': {
'facet': {
'name': 'spaghettis',
'data': 'individual',
'groupby': individual_id,
},
},
'marks': [
{
'type': 'line',
'from': {
'data': 'spaghettis',
},
'sort': {
'field': 'datum.%s' % state,
'order': 'ascending',
},
'encode': {
'update': {
'strokeWidth': {
'signal': 'spaghettiLineThickness',
},
'x': {
'scale': 'x',
'field': state,
},
'y': {
'scale': 'y',
'field': metric_signal,
},
'stroke': {
'scale': 'color',
'field': group_signal,
},
'opacity': [
{
'test': group_test,
'signal': 'spaghettiLineOpacity',
},
{
'value': 0.0,
},
],
},
},
},
# Need to add symbols into plot for mouseover
# https://github.com/vega/vega-tooltip/issues/120
{
'type': 'symbol',
'from': {
'data': 'spaghettis',
},
'encode': {
'update': {
'tooltip': {
'signal': spaghetti_signal,
},
'size': {
'signal': 'spaghettiSymbolSize',
},
'x': {
'scale': 'x',
'field': state,
},
'y': {
'scale': 'y',
'field': metric_signal,
},
'stroke': {
'scale': 'color',
'field': group_signal,
},
'fill': {
'scale': 'color',
'field': group_signal,
},
'opacity': [
{
'test': group_test,
'signal': 'spaghettiSymbolOpacity',
},
{
'value': 0.0,
},
],
},
},
},
],
})
signals.extend([
{
'name': 'spaghettiLineThickness',
'value': 0.5,
'bind': {
'input': 'range',
'min': 0.1,
'max': 10,
'step': 0.1,
'element': '#spaghetti-line-thickness',
},
},
{
'name': 'spaghettiLineOpacity',
'value': 0.5,
'bind': {
'input': 'range',
'min': 0.0,
'max': 1.0,
'step': 0.01,
'element': '#spaghetti-line-opacity',
},
},
{
'name': 'spaghettiSymbolSize',
'value': 50.0,
'bind': {
'input': 'range',
'min': 0.0,
'max': 500.0,
'step': 1.0,
'element': '#spaghetti-symbol-size',
},
},
{
'name': 'spaghettiSymbolOpacity',
'value': 0.0,
'bind': {
'input': 'range',
'min': 0.0,
'max': 1.0,
'step': 0.01,
'element': '#spaghetti-symbol-opacity',
},
}])
# Just a quick note, order doesn't matter here (JSON documents are not
# ordered) - this will render out stochastically, which is fine - vega
# knows what to do.
spec = {
# This `$schema` is only fetched as part of the interactive vega
# editor, which opens up outside of the visualization - this doesn't
# appear to create any kind of XHR side-effect when loading the
# visualization in an offline context.
'$schema': 'https://vega.github.io/schema/vega/v3.0.json',
'autosize': {
'type': 'fit-x',
'contains': 'padding',
'resize': True,
},
# These dimensions are here for when the viz is opened in the
# Vega Editor.
'width': 800,
'height': 400,
'signals': signals,
'scales': [
{
'name': 'x',
'type': 'linear',
'range': 'width',
'nice': True,
'domain': {
'data': 'individual',
'field': state,
'sort': True,
},
},
{
'name': 'y',
# Signal registration on this param is currently blocked by
# https://github.com/vega/vega/issues/525, which is why this
# setting is still a QIIME 2 param to this viz.
'type': yscale,
'range': 'height',
'nice': True,
'domain': {
'signal': domain_expr,
'sort': True,
},
},
{
'name': 'color',
'type': 'ordinal',
'range': {
'scheme': {
'signal': 'colorScheme',
},
},
'domain': {
'data': 'individual',
'field': 'groupByVal',
},
},
],
'axes': [
{
'orient': 'bottom',
'scale': 'x',
'title': state,
},
{
'orient': 'left',
'scale': 'y',
'title': metric_signal,
},
],
'legends': [
{
'stroke': 'color',
'title': group_signal,
'encode': {
'symbols': {
'name': 'legendSymbol',
'interactive': True,
'update': {
'fill': {
'value': 'transparent',
},
'strokeWidth': {
'value': 2,
},
'opacity': [
{
'test': opacity_test,
'value': 1.0,
},
{
'value': 0.15,
},
],
'size': {
'value': 100,
},
},
},
'labels': {
'name': 'legendLabel',
'interactive': True,
'update': {
'opacity': [
{
'test': opacity_test,
'value': 1,
},
{
'value': 0.25,
},
],
},
},
},
},
],
'marks': marks,
'data': [
{
'name': 'individual',
'values': data.to_dict('record'),
'transform': [
{
'type': 'formula',
'as': 'groupByVal',
'expr': 'datum[grouper]',
},
{
'type': 'formula',
'as': 'metricVal',
'expr': 'datum[metric]',
},
],
},
{
'name': 'globalVals',
'source': 'individual',
'transform': [
{
'type': 'aggregate',
'ops': [
'mean',
'min',
'max',
'stdev',
'min',
'max',
],
'fields': [
metric_signal,
state,
state,
metric_signal,
metric_signal,
metric_signal,
],
'as': [
'mean',
'minX',
'maxX',
'stdev',
'minY',
'maxY',
]
},
{
'type': 'formula',
'as': 'cl0',
'expr': 'datum.mean - (3 * datum.stdev)'
},
{
'type': 'formula',
'as': 'cl1',
'expr': 'datum.mean - (2 * datum.stdev)'
},
{
'type': 'formula',
'as': 'cl2',
'expr': 'datum.mean + (2 * datum.stdev)'
},
{
'type': 'formula',
'as': 'cl3',
'expr': 'datum.mean + (3 * datum.stdev)'
},
{
'type': 'formula',
'as': 'ext',
'expr': '[datum.cl0, datum.cl3]',
},
],
},
{
'name': 'aggBy',
'source': 'individual',
'transform': [
{
'type': 'aggregate',
'groupby': [
'groupByVal',
state,
],
'ops': [
'mean',
# TODO: parameterize these intervals
# I don't see an easy way at the moment to define
# your own confidence interval in vega.
'ci0',
'ci1',
'count',
],
'fields': [
metric_signal,
metric_signal,
metric_signal,
metric_signal,
],
'as': [
'mean',
'ci0',
'ci1',
'count',
],
},
],
},
{
'name': 'selected',
'on': [
{
'trigger': 'clear',
'remove': True
},
{
'trigger': '!shift',
'remove': True
},
{
'trigger': '!shift && clicked',
'insert': 'clicked'
},
{
'trigger': 'shift && clicked',
'toggle': 'clicked'
},
],
},
],
}
return json.dumps(spec)
| 33.699346 | 79 | 0.230185 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,414 | 0.271981 |
a0ce075406a832ed84007060dd79bad299dae4e6 | 11,696 | py | Python | state_workflow_sdk/api/state_workflow/state_workflow_client.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
]
| 5 | 2019-07-31T04:11:05.000Z | 2021-01-07T03:23:20.000Z | state_workflow_sdk/api/state_workflow/state_workflow_client.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
]
| null | null | null | state_workflow_sdk/api/state_workflow/state_workflow_client.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
import os
import sys
import state_workflow_sdk.api.state_workflow.callback_pb2
import state_workflow_sdk.api.state_workflow.createStateWorkflow_pb2
import state_workflow_sdk.model.state_workflow.stateWorkflow_pb2
import state_workflow_sdk.api.state_workflow.deleteStateWorkflow_pb2
import google.protobuf.empty_pb2
import state_workflow_sdk.api.state_workflow.filterInstanceOfStateWorkflow_pb2
import state_workflow_sdk.api.state_workflow.searchStateWorkflow_pb2
import state_workflow_sdk.api.state_workflow.transitWorkflowStatus_pb2
import state_workflow_sdk.utils.http_util
import google.protobuf.json_format
class StateWorkflowClient(object):
def __init__(self, server_ip="", server_port=0, service_name="", host=""):
"""
初始化client
:param server_ip: 指定sdk请求的server_ip,为空时走名字服务路由
:param server_port: 指定sdk请求的server_port,与server_ip一起使用, 为空时走名字服务路由
:param service_name: 指定sdk请求的service_name, 为空时按契约名称路由。如果server_ip和service_name同时设置,server_ip优先级更高
:param host: 指定sdk请求服务的host名称, 如cmdb.easyops-only.com
"""
if server_ip == "" and server_port != 0 or server_ip != "" and server_port == 0:
raise Exception("server_ip和server_port必须同时指定")
self._server_ip = server_ip
self._server_port = server_port
self._service_name = service_name
self._host = host
def callback_state_workflow(self, request, org, user, timeout=10):
# type: (state_workflow_sdk.api.state_workflow.callback_pb2.CallbackStateWorkflowRequest, int, str, int) -> state_workflow_sdk.api.state_workflow.callback_pb2.CallbackStateWorkflowResponse
"""
状态工作流执行回调
:param request: callback_state_workflow请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: state_workflow_sdk.api.state_workflow.callback_pb2.CallbackStateWorkflowResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.state_workflow.state_workflow.CallbackStateWorkflow"
uri = "/api/v1/stateWorkflow/callback"
requestParam = request
rsp_obj = state_workflow_sdk.utils.http_util.do_api_request(
method="POST",
src_name="logic.state_workflow_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = state_workflow_sdk.api.state_workflow.callback_pb2.CallbackStateWorkflowResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def create_state_workflow(self, request, org, user, timeout=10):
# type: (state_workflow_sdk.api.state_workflow.createStateWorkflow_pb2.CreateStateWorkflowRequest, int, str, int) -> state_workflow_sdk.model.state_workflow.stateWorkflow_pb2.StateWorkflow
"""
创建状态工作流
:param request: create_state_workflow请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: state_workflow_sdk.model.state_workflow.stateWorkflow_pb2.StateWorkflow
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.state_workflow.state_workflow.CreateStateWorkflow"
uri = "/api/v1/stateWorkflow"
requestParam = request
rsp_obj = state_workflow_sdk.utils.http_util.do_api_request(
method="POST",
src_name="logic.state_workflow_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = state_workflow_sdk.model.state_workflow.stateWorkflow_pb2.StateWorkflow()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def delete_state_workflow(self, request, org, user, timeout=10):
# type: (state_workflow_sdk.api.state_workflow.deleteStateWorkflow_pb2.DeleteStateWorkflowRequest, int, str, int) -> google.protobuf.empty_pb2.Empty
"""
删除状态工作流
:param request: delete_state_workflow请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: google.protobuf.empty_pb2.Empty
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.state_workflow.state_workflow.DeleteStateWorkflow"
uri = "/api/v1/stateWorkflow/{instanceId}".format(
instanceId=request.instanceId,
)
requestParam = request
rsp_obj = state_workflow_sdk.utils.http_util.do_api_request(
method="DELETE",
src_name="logic.state_workflow_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = google.protobuf.empty_pb2.Empty()
google.protobuf.json_format.ParseDict(rsp_obj, rsp, ignore_unknown_fields=True)
return rsp
def filter_instance_of_state_workflow(self, request, org, user, timeout=10):
# type: (state_workflow_sdk.api.state_workflow.filterInstanceOfStateWorkflow_pb2.FilterInstanceOfStateWorkflowRequest, int, str, int) -> state_workflow_sdk.api.state_workflow.filterInstanceOfStateWorkflow_pb2.FilterInstanceOfStateWorkflowResponse
"""
按工作流filter过滤实例
:param request: filter_instance_of_state_workflow请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: state_workflow_sdk.api.state_workflow.filterInstanceOfStateWorkflow_pb2.FilterInstanceOfStateWorkflowResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.state_workflow.state_workflow.FilterInstanceOfStateWorkflow"
uri = "/api/v1/stateWorkflow/instance/filter"
requestParam = request
rsp_obj = state_workflow_sdk.utils.http_util.do_api_request(
method="POST",
src_name="logic.state_workflow_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = state_workflow_sdk.api.state_workflow.filterInstanceOfStateWorkflow_pb2.FilterInstanceOfStateWorkflowResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def search_state_workflow(self, request, org, user, timeout=10):
# type: (state_workflow_sdk.api.state_workflow.searchStateWorkflow_pb2.SearchStateWorkflowRequest, int, str, int) -> state_workflow_sdk.model.state_workflow.stateWorkflow_pb2.StateWorkflow
"""
搜索状态工作流
:param request: search_state_workflow请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: state_workflow_sdk.model.state_workflow.stateWorkflow_pb2.StateWorkflow
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.state_workflow.state_workflow.SearchStateWorkflow"
uri = "/api/v1/stateWorkflow/_search"
requestParam = request
rsp_obj = state_workflow_sdk.utils.http_util.do_api_request(
method="POST",
src_name="logic.state_workflow_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = state_workflow_sdk.model.state_workflow.stateWorkflow_pb2.StateWorkflow()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def transit_state_workflow_status(self, request, org, user, timeout=10):
# type: (state_workflow_sdk.api.state_workflow.transitWorkflowStatus_pb2.TransitStateWorkflowStatusRequest, int, str, int) -> state_workflow_sdk.api.state_workflow.transitWorkflowStatus_pb2.TransitStateWorkflowStatusResponse
"""
实例属性状态切换
:param request: transit_state_workflow_status请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: state_workflow_sdk.api.state_workflow.transitWorkflowStatus_pb2.TransitStateWorkflowStatusResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.state_workflow.state_workflow.TransitStateWorkflowStatus"
uri = "/api/v1/stateWorkflow/transit"
requestParam = request
rsp_obj = state_workflow_sdk.utils.http_util.do_api_request(
method="POST",
src_name="logic.state_workflow_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = state_workflow_sdk.api.state_workflow.transitWorkflowStatus_pb2.TransitStateWorkflowStatusResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
| 41.183099 | 254 | 0.658516 | 11,655 | 0.946945 | 0 | 0 | 0 | 0 | 0 | 0 | 4,782 | 0.388528 |
a0ceec8ec85ef44ddb9d9cd56199a36790b171fc | 4,171 | py | Python | tests/contour_classifiers/test_randomforest.py | yamathcy/motif | 3f43568e59f0879fbab5ef278e9e687b7cac3dd6 | [
"MIT"
]
| 21 | 2016-08-22T22:00:49.000Z | 2020-03-29T04:15:19.000Z | tests/contour_classifiers/test_randomforest.py | yamathcy/motif | 3f43568e59f0879fbab5ef278e9e687b7cac3dd6 | [
"MIT"
]
| 22 | 2016-08-28T01:07:08.000Z | 2018-02-07T14:38:26.000Z | tests/contour_classifiers/test_randomforest.py | yamathcy/motif | 3f43568e59f0879fbab5ef278e9e687b7cac3dd6 | [
"MIT"
]
| 3 | 2017-01-12T10:04:27.000Z | 2022-01-06T13:25:48.000Z | """Test for motif.classify.mvgaussian
"""
from __future__ import print_function
import unittest
import numpy as np
from motif.contour_classifiers import random_forest
def array_equal(array1, array2):
return np.all(np.isclose(array1, array2))
class TestRandomForest(unittest.TestCase):
def setUp(self):
self.clf = random_forest.RandomForest(
n_estimators=2, n_iter_search=1, random_state=6
)
def test_n_estimators(self):
expected = 2
actual = self.clf.n_estimators
self.assertEqual(expected, actual)
def test_n_jobs(self):
expected = -1
actual = self.clf.n_jobs
self.assertEqual(expected, actual)
def test_class_weight(self):
expected = 'balanced'
actual = self.clf.class_weight
self.assertEqual(expected, actual)
def test_n_iter_search(self):
expected = 1
actual = self.clf.n_iter_search
self.assertEqual(expected, actual)
def test_clf(self):
expected = None
actual = self.clf.clf
self.assertEqual(expected, actual)
def test_predict_error(self):
with self.assertRaises(ReferenceError):
self.clf.predict(np.array([0, 0, 0]))
def test_fit(self):
X = np.array([
[1.0, 2.0], [0.0, 0.0], [0.5, 0.7],
[0.0, 0.0], [1.0, 2.5], [-1.0, 2.1],
[1.2, 1.2], [1.0, 1.0], [4.0, 0.0],
[-1.0, -1.0]
])
Y = np.array([0, 1, 0, 1, 0, 0, 1, 1, 0, 1])
self.clf.fit(X, Y)
self.assertIsNotNone(self.clf.clf)
def test_predict(self):
X = np.array([
[1.0, 2.0], [0.0, 0.0], [0.5, 0.7],
[0.0, 0.0], [1.0, 2.5], [-1.0, 2.1],
[1.2, 1.2], [1.0, 1.0], [4.0, 0.0],
[-1.0, -1.0]
])
Y = np.array([0, 1, 0, 1, 0, 0, 1, 1, 0, 1])
self.clf.fit(X, Y)
actual = self.clf.predict(
np.array([[1.0, 2.0], [1.0, 3.0], [-2.0, -2.0]])
)
expected = np.array([0.0, 0.0, 1.0])
self.assertTrue(array_equal(actual, expected))
def test_predict_discrete_label(self):
X = np.array([
[1.0, 2.0], [0.0, 0.0], [0.5, 0.7],
[0.0, 0.0], [1.0, 2.5], [-1.0, 2.1],
[1.2, 1.2], [1.0, 1.0], [4.0, 0.0],
[-1.0, -1.0]
])
Y = np.array([0, 1, 0, 1, 0, 0, 1, 1, 0, 1])
self.clf.fit(X, Y)
actual = self.clf.predict_discrete_label(
np.array([[1.0, 2.0], [1.0, 3.0], [-2.0, -2.0]])
)
expected = np.array([0, 0, 1])
self.assertTrue(array_equal(actual, expected))
def test_threshold(self):
expected = 0.5
actual = self.clf.threshold
self.assertEqual(expected, actual)
def test_get_id(self):
expected = 'random_forest'
actual = self.clf.get_id()
self.assertEqual(expected, actual)
def test_score(self):
predicted_scores = np.array([0.0, 0.25, 1.0, 0.5, 0.9])
y_pred = np.array([0, 0, 1, 1, 1])
y_target = np.array([0, 0, 1, 1, 1])
expected = {
'accuracy': 1.0,
'mcc': 1.0,
'precision': np.array([1.0, 1.0]),
'recall': np.array([1.0, 1.0]),
'f1': np.array([1.0, 1.0]),
'support': np.array([2, 3]),
'confusion matrix': np.array([[2, 0], [0, 3]]),
'auc score': 1.0
}
actual = self.clf.score(y_pred, y_target, y_prob=predicted_scores)
self.assertEqual(expected['accuracy'], actual['accuracy'])
self.assertAlmostEqual(expected['mcc'], actual['mcc'], places=1)
self.assertTrue(
array_equal(expected['precision'], actual['precision'])
)
self.assertTrue(array_equal(expected['recall'], actual['recall']))
self.assertTrue(array_equal(expected['f1'], actual['f1']))
self.assertTrue(array_equal(expected['support'], actual['support']))
self.assertTrue(array_equal(
expected['confusion matrix'], actual['confusion matrix']
))
self.assertEqual(expected['auc score'], actual['auc score'])
| 32.585938 | 76 | 0.529369 | 3,919 | 0.939583 | 0 | 0 | 0 | 0 | 0 | 0 | 294 | 0.070487 |
a0cf8257e1729da63a070f7fb21ed2b3279418e3 | 7,365 | py | Python | awsenv/profile.py | KensoDev/awsenv | 4bf759106d2e0d79221d0ca9188ed7686e119b2c | [
"Apache-2.0"
]
| 6 | 2016-09-11T08:39:50.000Z | 2018-10-22T13:41:34.000Z | awsenv/profile.py | KensoDev/awsenv | 4bf759106d2e0d79221d0ca9188ed7686e119b2c | [
"Apache-2.0"
]
| 1 | 2017-01-09T23:58:20.000Z | 2017-01-09T23:58:20.000Z | awsenv/profile.py | KensoDev/awsenv | 4bf759106d2e0d79221d0ca9188ed7686e119b2c | [
"Apache-2.0"
]
| 5 | 2017-01-09T23:26:12.000Z | 2021-09-08T09:35:59.000Z | """
Profile-aware session wrapper.
"""
from os import environ
from botocore.exceptions import ProfileNotFound
from botocore.session import Session
from awsenv.cache import CachedSession
def get_default_profile_name():
"""
Get the default profile name from the environment.
"""
return environ.get("AWS_DEFAULT_PROFILE", "default")
class AWSSession(object):
"""
AWS session wrapper.
"""
def __init__(self, profile=None):
self.profile = profile
self.session = Session(profile=self.profile)
@property
def access_key_id(self):
return None
@property
def secret_access_key(self):
return None
@property
def region_name(self):
return environ.get("AWS_REGION", environ.get("AWS_DEFAULT_REGION", "us-west-2"))
@property
def session_token(self):
return None
def create_client(self,
service_name,
api_version=None,
use_ssl=True,
verify=None,
endpoint_url=None,
config=None):
"""
Create a service from the wrapped session.
Automatically populates the region name, access key, secret key, and session token.
Allows other parameters to be passed.
"""
return self.session.create_client(
service_name=service_name,
region_name=self.region_name,
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
aws_session_token=self.session_token,
api_version=api_version,
use_ssl=use_ssl,
verify=verify,
endpoint_url=endpoint_url,
config=config,
)
class AWSProfile(AWSSession):
"""
AWS profile configuration.
"""
def __init__(self,
profile,
session_duration,
cached_session,
account_id=None):
"""
Configure a session for a profile.
:param profile: the name of the profile to use, if any
:param session_duration: the duration of the session (in seconds)
must be in the range 900-3600
:param cached_session: the cached session to use, if any
:param account_id: the account id for profile auto-generation (if any)
"""
self.session_duration = session_duration
self.cached_session = cached_session
self.account_id = account_id
super(AWSProfile, self).__init__(profile)
@property
def access_key_id(self):
return self.merged_config.get("aws_access_key_id")
@property
def secret_access_key(self):
return self.merged_config.get("aws_secret_access_key")
@property
def region_name(self):
return self.merged_config.get("region")
@property
def role_arn(self):
return self.profile_config.get("role_arn")
@property
def session_token(self):
return self.cached_session.token if self.cached_session else None
@property
def session_name(self):
return self.cached_session.name if self.cached_session else None
@property
def profile_config(self):
"""
Return the loaded configuration for the profile.
"""
try:
return self.session.get_scoped_config()
except ProfileNotFound:
if self.account_id is None:
raise
# attempt to generate the profile configuration
self.session._profile_map[self.profile] = dict(
role_arn="arn:aws:iam::{}:role/{}".format(
self.account_id,
self.profile,
),
source_profile=get_default_profile_name(),
)
return self.session.get_scoped_config()
@property
def source_profile_config(self):
"""
Return the loaded configuration for the source profile, if any.
"""
source_profile_name = self.profile_config.get("source_profile")
all_profiles = self.session.full_config["profiles"]
return all_profiles.get(source_profile_name, {})
@property
def merged_config(self):
"""
Merged the profile and source configurations along with the current credentials.
"""
result = self.source_profile_config.copy()
result.update(self.profile_config)
if self.session._credentials:
result.update(
aws_access_key_id=self.session._credentials.access_key,
aws_secret_access_key=self.session._credentials.secret_key,
aws_session_token=self.session._credentials.token,
)
# Override with AWS_REGION environment variable
region_from_envvar = environ.get("AWS_REGION")
if region_from_envvar:
result.update(region=region_from_envvar)
return result
def to_envvars(self):
return {
"AWS_ACCESS_KEY_ID": self.access_key_id,
"AWS_DEFAULT_REGION": self.region_name,
"AWS_PROFILE": self.profile,
"AWS_SECRET_ACCESS_KEY": self.secret_access_key,
"AWS_SESSION_NAME": self.session_name,
"AWS_SESSION_TOKEN": self.session_token,
}
def update_credentials(self):
"""
Update the profile's credentials by assuming a role, if necessary.
"""
if not self.role_arn:
return
if self.cached_session is not None:
# use current role
access_key, secret_key = self.current_role()
else:
# assume role to get a new token
access_key, secret_key = self.assume_role()
if access_key and secret_key:
self.session.set_credentials(
access_key=access_key,
secret_key=secret_key,
token=self.cached_session.token if self.cached_session else None,
)
def current_role(self):
"""
Load credentials for the current role.
"""
return (
environ.get("AWS_ACCESS_KEY_ID", self.access_key_id),
environ.get("AWS_SECRET_ACCESS_KEY", self.secret_access_key),
)
def assume_role(self):
"""
Assume a role.
"""
# we need to pass in the regions and keys because botocore does not
# automatically merge configuration from the source_profile
sts_client = self.session.create_client(
service_name="sts",
region_name=self.region_name,
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
)
session_name = CachedSession.make_name()
result = sts_client.assume_role(**{
"RoleArn": self.role_arn,
"RoleSessionName": session_name,
"DurationSeconds": self.session_duration,
})
# update the cached session
self.cached_session = CachedSession(
name=session_name,
token=result["Credentials"]["SessionToken"],
profile=self.profile,
)
return (
result["Credentials"]["AccessKeyId"],
result["Credentials"]["SecretAccessKey"],
)
| 31.075949 | 91 | 0.60611 | 7,009 | 0.951663 | 0 | 0 | 2,657 | 0.36076 | 0 | 0 | 2,003 | 0.271962 |
a0d0d288568d1ad31c787944a756b68fdcfc394c | 13,358 | py | Python | cail/algo/twoiwil.py | Stanford-ILIAD/Confidence-Aware-Imitation-Learning | 1d8af0e4ab87a025885133a2384d5a937329b2f5 | [
"MIT"
]
| 16 | 2021-10-30T15:19:37.000Z | 2022-03-23T12:57:49.000Z | cail/algo/twoiwil.py | syzhang092218-source/Confidence-Aware-Imitation-Learning | 1d8af0e4ab87a025885133a2384d5a937329b2f5 | [
"MIT"
]
| null | null | null | cail/algo/twoiwil.py | syzhang092218-source/Confidence-Aware-Imitation-Learning | 1d8af0e4ab87a025885133a2384d5a937329b2f5 | [
"MIT"
]
| 2 | 2021-11-29T11:28:16.000Z | 2022-03-06T14:12:47.000Z | import torch
import os
import torch.nn.functional as F
import numpy as np
import copy
from torch import nn
from torch.optim import Adam
from torch.autograd import Variable
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from typing import Tuple
from .ppo import PPO, PPOExpert
from .utils import CULoss
from cail.network import AIRLDiscrim, Classifier
from cail.buffer import SerializedBuffer
class TwoIWIL(PPO):
"""
Implementation of 2IWIL, using PPO-based AIRL as the backbone IL algorithm
Reference:
----------
[1] Wu, Y.-H., Charoenphakdee, N., Bao, H., Tangkaratt, V.,and Sugiyama, M.
Imitation learning from imperfect demonstration.
In International Conference on MachineLearning, pp. 6818–6827, 2019.
Parameters
----------
buffer_exp: SerializedBuffer
buffer of demonstrations
state_shape: np.array
shape of the state space
action_shape: np.array
shape of the action space
device: torch.device
cpu or cuda
seed: int
random seed
gamma: float
discount factor
rollout_length: int
rollout length of the buffer
mix_buffer: int
times for rollout buffer to mix
batch_size: int
batch size for sampling from current policy and demonstrations
lr_actor: float
learning rate of the actor
lr_critic: float
learning rate of the critic
lr_disc: float
learning rate of the discriminator
units_actor: tuple
hidden units of the actor
units_critic: tuple
hidden units of the critic
units_disc_r: tuple
hidden units of the discriminator r
units_disc_v: tuple
hidden units of the discriminator v
epoch_ppo: int
at each update period, update ppo for these times
epoch_disc: int
at each update period, update the discriminator for these times
clip_eps: float
clip coefficient in PPO's objective
lambd: float
lambd factor
coef_ent: float
entropy coefficient
max_grad_norm: float
maximum gradient norm
classifier_iter: int
iteration of training the classifier
lr_classifier: float
learning rate of the classifier
"""
def __init__(
self,
buffer_exp: SerializedBuffer,
state_shape: np.array,
action_shape: np.array,
device: torch.device,
seed: int,
gamma: float = 0.995,
rollout_length: int = 10000,
mix_buffer: int = 1,
batch_size: int = 64,
lr_actor: float = 3e-4,
lr_critic: float = 3e-4,
lr_disc: float = 3e-4,
units_actor: tuple = (64, 64),
units_critic: tuple = (64, 64),
units_disc_r: tuple = (100, 100),
units_disc_v: tuple = (100, 100),
epoch_ppo: int = 50,
epoch_disc: int = 10,
clip_eps: float = 0.2,
lambd: float = 0.97,
coef_ent: float = 0.0,
max_grad_norm: float = 10.0,
classifier_iter: int = 25000,
lr_classifier: float = 3e-4
):
super().__init__(
state_shape, action_shape, device, seed, gamma, rollout_length,
mix_buffer, lr_actor, lr_critic, units_actor, units_critic,
epoch_ppo, clip_eps, lambd, coef_ent, max_grad_norm
)
# expert's buffer
self.buffer_exp = buffer_exp
# discriminator
self.disc = AIRLDiscrim(
state_shape=state_shape,
gamma=gamma,
hidden_units_r=units_disc_r,
hidden_units_v=units_disc_v,
hidden_activation_r=nn.ReLU(inplace=True),
hidden_activation_v=nn.ReLU(inplace=True)
).to(device)
self.learning_steps_disc = 0
self.optim_disc = Adam(self.disc.parameters(), lr=lr_disc)
self.batch_size = batch_size
self.epoch_disc = epoch_disc
# classifier
self.classifier = Classifier(state_shape, action_shape).to(device)
self.n_label_traj = self.buffer_exp.n_traj
self.classifier_iter = classifier_iter
self.optim_classifier = Adam(self.classifier.parameters(), lr=lr_classifier)
self.train_classifier()
self.save_classifier = False
# label conf
states_exp, action_exp, _, _, _ = self.buffer_exp.get()
self.conf = torch.sigmoid(self.classifier(torch.cat((states_exp, action_exp), dim=-1)))
def train_classifier(self):
"""Train a classifier"""
print('Training classifier')
label_traj_states = copy.deepcopy(self.buffer_exp.traj_states)
label_traj_actions = copy.deepcopy(self.buffer_exp.traj_actions)
label_traj_rewards = copy.deepcopy(self.buffer_exp.traj_rewards)
# use ranking to label confidence
conf_gap = 1.0 / float(self.n_label_traj - 1)
ranking = np.argsort(label_traj_rewards)
traj_lengths = np.asarray([i.shape[0] for i in label_traj_states])
n_label_demos = traj_lengths.sum()
label = np.zeros(n_label_demos)
ptr = 0
for i in range(traj_lengths.shape[0]):
label[ptr: ptr + traj_lengths[i]] = ranking[i] * conf_gap
ptr += traj_lengths[i]
label = torch.from_numpy(label).to(self.device)
label_traj = torch.cat((torch.cat(label_traj_states), torch.cat(label_traj_actions)), dim=-1)
batch = min(128, label_traj.shape[0])
ubatch = int(batch / label_traj.shape[0] * self.buffer_exp.buffer_size)
loss_fun = CULoss(label, beta=1 - self.buffer_exp.label_ratio, device=self.device, non=True)
# start training
for i_iter in tqdm(range(self.classifier_iter)):
idx = np.random.choice(label_traj.shape[0], batch)
labeled = self.classifier(Variable(label_traj[idx, :]))
smp_conf = label[idx]
states_exp, actions_exp, _, _, _ = self.buffer_exp.sample(ubatch)
unlabeled = self.classifier(torch.cat((states_exp, actions_exp), dim=-1))
self.optim_classifier.zero_grad()
risk = loss_fun(smp_conf, labeled, unlabeled)
risk.backward()
self.optim_classifier.step()
if i_iter % 2000 == 0:
tqdm.write(f'iteration: {i_iter}\tcu loss: {risk.data.item():.3f}')
self.classifier = self.classifier.eval()
print("Classifier finished training")
def sample_exp(
self,
batch_size: int
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Sample from expert's demonstrations
Parameters
----------
batch_size: int
number of samples
Returns
-------
states: torch.Tensor
expert's states
actions: torch.Tensor
expert's actions
dones: torch.Tensor
expert's dones
next_states: torch.Tensor
expert's next states
conf: torch.Tensor
confidence of expert's demonstrations
"""
# Samples from expert's demonstrations.
all_states_exp, all_actions_exp, _, all_dones_exp, all_next_states_exp = \
self.buffer_exp.get()
all_conf = Variable(self.conf)
all_conf_mean = Variable(all_conf.mean())
conf = all_conf / all_conf_mean
with torch.no_grad():
self.conf = conf
idxes = np.random.randint(low=0, high=all_states_exp.shape[0], size=batch_size)
return (
all_states_exp[idxes],
all_actions_exp[idxes],
all_dones_exp[idxes],
all_next_states_exp[idxes],
self.conf[idxes]
)
def update(self, writer: SummaryWriter):
"""
Update the algorithm
Parameters
----------
writer: SummaryWriter
writer for logs
"""
self.learning_steps += 1
for _ in range(self.epoch_disc):
self.learning_steps_disc += 1
# samples from current policy's trajectories
states, _, _, dones, log_pis, next_states = self.buffer.sample(self.batch_size)
# samples from expert's demonstrations
states_exp, actions_exp, dones_exp, next_states_exp, conf = self.sample_exp(self.batch_size)
# calculate log probabilities of expert actions
with torch.no_grad():
log_pis_exp = self.actor.evaluate_log_pi(states_exp, actions_exp)
# update discriminator
self.update_disc(
states, dones, log_pis, next_states, states_exp,
dones_exp, log_pis_exp, next_states_exp, conf, writer
)
# we don't use reward signals here
states, actions, _, dones, log_pis, next_states = self.buffer.get()
# calculate rewards
rewards = self.disc.calculate_reward(
states, dones, log_pis, next_states)
# update PPO using estimated rewards
self.update_ppo(
states, actions, rewards, dones, log_pis, next_states, writer)
def update_disc(
self,
states: torch.Tensor,
dones: torch.Tensor,
log_pis: torch.Tensor,
next_states: torch.Tensor,
states_exp: torch.Tensor,
dones_exp: torch.Tensor,
log_pis_exp: torch.Tensor,
next_states_exp: torch.Tensor,
conf: torch.Tensor,
writer: SummaryWriter
):
"""
Update the discriminator
Parameters
----------
states: torch.Tensor
states sampled from current IL policy
dones: torch.Tensor
dones sampled from current IL policy
log_pis: torch.Tensor
log(\pi(s|a)) sampled from current IL policy
next_states: torch.Tensor
next states sampled from current IL policy
states_exp: torch.Tensor
states sampled from demonstrations
dones_exp: torch.Tensor
dones sampled from demonstrations
log_pis_exp: torch.Tensor
log(\pi(s|a)) sampled from demonstrations
next_states_exp: torch.Tensor
next states sampled from demonstrations
conf: torch.Tensor
learned confidence of the demonstration samples
writer: SummaryWriter
writer for logs
"""
# output of discriminator is (-inf, inf), not [0, 1]
logits_pi = self.disc(states, dones, log_pis, next_states)
logits_exp = self.disc(states_exp, dones_exp, log_pis_exp, next_states_exp)
# discriminator is to maximize E_{\pi} [log(1 - D)] + E_{exp} [log(D)]
loss_pi = -F.logsigmoid(-logits_pi).mean()
loss_exp = -(F.logsigmoid(logits_exp).mul(conf)).mean()
loss_disc = loss_pi + loss_exp
self.optim_disc.zero_grad()
loss_disc.backward()
self.optim_disc.step()
if self.learning_steps_disc % self.epoch_disc == 0:
writer.add_scalar(
'loss/disc', loss_disc.item(), self.learning_steps)
# discriminator's accuracies
with torch.no_grad():
acc_pi = (logits_pi < 0).float().mean().item()
acc_exp = (logits_exp > 0).float().mean().item()
writer.add_scalar('stats/acc_pi', acc_pi, self.learning_steps)
writer.add_scalar('stats/acc_exp', acc_exp, self.learning_steps)
def save_models(self, save_dir: str):
"""
Save the model
Parameters
----------
save_dir: str
path to save
"""
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
torch.save(self.disc.state_dict(), f'{save_dir}/disc.pkl')
torch.save(self.actor.state_dict(), f'{save_dir}/actor.pkl')
if not self.save_classifier:
torch.save(self.classifier.state_dict(), f'{save_dir}/../classifier.pkl')
self.save_classifier = True
class TwoIWILExpert(PPOExpert):
"""
Well-trained 2IWIL agent
Parameters
----------
state_shape: np.array
shape of the state space
action_shape: np.array
shape of the action space
device: torch.device
cpu or cuda
path: str
path to the well-trained weights
units_actor: tuple
hidden units of the actor
"""
def __init__(
self,
state_shape: np.array,
action_shape: np.array,
device: torch.device,
path: str,
units_actor: tuple = (64, 64)
):
super(TwoIWILExpert, self).__init__(
state_shape=state_shape,
action_shape=action_shape,
device=device,
path=path,
units_actor=units_actor
)
| 34.786458 | 105 | 0.586166 | 12,914 | 0.966617 | 0 | 0 | 0 | 0 | 0 | 0 | 4,756 | 0.355988 |
a0d0f0826bf05af84c68e2d12e3788dc07ebfcd6 | 7,327 | py | Python | data/generation_scripts/MantaFlow/scripts3D/compactifyData.py | tum-pbs/VOLSIM | 795a31c813bf072eb88289126d7abd9fba8b0e54 | [
"MIT"
]
| 7 | 2022-01-28T09:40:15.000Z | 2022-03-07T01:52:00.000Z | data/generation_scripts/MantaFlow/scripts3D/compactifyData.py | tum-pbs/VOLSIM | 795a31c813bf072eb88289126d7abd9fba8b0e54 | [
"MIT"
]
| null | null | null | data/generation_scripts/MantaFlow/scripts3D/compactifyData.py | tum-pbs/VOLSIM | 795a31c813bf072eb88289126d7abd9fba8b0e54 | [
"MIT"
]
| 1 | 2022-03-14T22:08:47.000Z | 2022-03-14T22:08:47.000Z | import numpy as np
import os, shutil
import imageio
baseDir = "data/train_verbose"
outDir = "data/train"
#baseDir = "data/test_verbose"
#outDir = "data/test"
outDirVidCopy = "data/videos"
combineVidsAll = {"smoke" : ["densMean", "densSlice", "velMean", "velSlice", "presMean", "presSlice"],
"liquid": ["flagsMean", "flagsSlice", "velMean", "velSlice", "phiMean", "phiSlice"] }
convertData = True
processVid = True
copyVidOnly = False
ignoreTop = ["shapes", "waves"]
ignoreSim = []
ignoreFrameDict = {}
excludeIgnoreFrame = False
topDirs = os.listdir(baseDir)
topDirs.sort()
#shutil.rmtree(outDir)
#os.makedirs(outDir)
# top level folders
for topDir in topDirs:
mantaMsg("\n" + topDir)
if ignoreTop and any( item in topDir for item in ignoreTop ) :
mantaMsg("Ignored")
continue
simDir = os.path.join(baseDir, topDir)
sims = os.listdir(simDir)
sims.sort()
# sim_000000 folders
for sim in sims:
if ignoreSim and any( item in sim for item in ignoreSim ) :
mantaMsg(sim + " - Ignored")
continue
currentDir = os.path.join(simDir, sim)
files = os.listdir(currentDir)
files.sort()
destDir = os.path.join(outDir, topDir, sim)
#if os.path.isdir(destDir):
# shutil.rmtree(destDir)
if not os.path.isdir(destDir):
os.makedirs(destDir)
# single files
for file in files:
filePath = os.path.join(currentDir, file)
# copy src folder to destination
if os.path.isdir(filePath) and file == "src":
dest = os.path.join(destDir, "src")
if not os.path.isdir(dest):
shutil.copytree(filePath, dest, symlinks=False)
# combine video files
elif os.path.isdir(filePath) and file == "render":
if not processVid:
continue
dest = os.path.join(destDir, "render")
if copyVidOnly:
shutil.copytree(filePath, dest, symlinks=False)
continue
if not os.path.isdir(dest):
os.makedirs(dest)
#mantaMsg(file)
renderDir = os.path.join(currentDir, "render")
vidFiles = os.listdir(renderDir)
if "smoke" in topDir: combineVids = combineVidsAll["smoke"]
elif "liquid" in topDir: combineVids = combineVidsAll["liquid"]
else: combineVids = [""]
for vidFile in vidFiles:
if combineVids[0] + "00.mp4" not in vidFile:
continue
vidLine = []
for combineVid in combineVids:
# find all video part files corresponding to current one
vidParts = []
i = 0
while os.path.exists(os.path.join(renderDir, vidFile.replace(combineVids[0]+"00.mp4", combineVid+"%02d.mp4" % i))):
vidParts.append(vidFile.replace(combineVids[0]+"00.mp4", combineVid+"%02d.mp4" % i))
i += 1
assert len(vidParts) == 11
# combine each video part file
loadedVids = []
for part in vidParts:
currentFile = os.path.join(renderDir, part)
loaded = imageio.mimread(currentFile)
#mantaMsg(len(loaded))
#mantaMsg(loaded[0].shape)
loadedVids.append(loaded)
#temp1 = np.concatenate(loadedVids[0:4], axis=2)
#temp2 = np.concatenate(loadedVids[4:8], axis=2)
#temp3 = np.concatenate(loadedVids[8:11]+[np.zeros_like(loadedVids[0])], axis=2)
#vidLine.append(np.concatenate([temp1, temp2, temp3], axis=1))
vidLine.append(np.concatenate(loadedVids, axis=2))
combined = np.concatenate(vidLine, axis=1)
# save combined file
if combineVids[0] == "": newName = os.path.join(dest, "%s_%s_%s.mp4" % (topDir, sim, vidFile.replace("00.mp4", ".mp4")))
else: newName = os.path.join(dest, "%s_%s.mp4" % (topDir, sim))
imageio.mimwrite(newName, combined, quality=6, fps=11, ffmpeg_log_level="error")
# save copy
if combineVids[0] == "": newNameCopy = os.path.join(outDirVidCopy, "%s_%s_%s.mp4" % (topDir, sim, vidFile.replace("00.mp4", ".mp4")))
else: newNameCopy = os.path.join(outDirVidCopy, "%s_%s.mp4" % (topDir, sim))
imageio.mimwrite(newNameCopy, combined, quality=6, fps=11, ffmpeg_log_level="error")
# copy description files to destination
elif os.path.splitext(filePath)[1] == ".json" or os.path.splitext(filePath)[1] == ".py" or os.path.splitext(filePath)[1] == ".log":
shutil.copy(filePath, destDir)
# ignore other dirs and non .npz files
elif os.path.isdir(filePath) or os.path.splitext(filePath)[1] != ".npz" or "part00" not in file:
continue
# combine part files
else:
if not convertData:
continue
if ignoreFrameDict:
filterFrames = []
for key, value in ignoreFrameDict.items():
if key in topDir:
filterFrames = value
break
assert (filterFrames != []), "Keys in filterFrameDict don't match dataDir structure!"
# continue for frames when excluding or including according to filter
if excludeIgnoreFrame == any( item in file for item in filterFrames ):
continue
# find all part files corresponding to current one
parts = [file]
i = 1
while os.path.exists(os.path.join(currentDir, file.replace("part00", "part%02d" % i))):
parts.append(file.replace("part00", "part%02d" % i))
i += 1
assert len(parts) == 11
# combine each part file
domain = np.load(os.path.join(currentDir, parts[0]))['arr_0']
res = domain.shape[0]
combined = np.zeros([len(parts), res, res, res, domain.shape[3]])
for f in range(len(parts)):
currentFile = os.path.join(currentDir, parts[f])
loaded = np.load(currentFile)['arr_0']
combined[f] = loaded
# save combined file
newName = file.replace("_part00", "")
np.savez_compressed( os.path.join(destDir, newName), combined )
loaded = np.load( os.path.join(destDir, newName) )['arr_0']
mantaMsg(os.path.join(sim, newName) + "\t" + str(loaded.shape))
| 43.613095 | 153 | 0.512079 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,519 | 0.207315 |
a0d159678318f4de46108d8e3c19f4a355d8744f | 14,238 | py | Python | qiskit/aqua/operators/base_operator.py | Sahar2/qiskit-aqua | a228fbe6b9613cff43e47796a7e4843deba2b051 | [
"Apache-2.0"
]
| null | null | null | qiskit/aqua/operators/base_operator.py | Sahar2/qiskit-aqua | a228fbe6b9613cff43e47796a7e4843deba2b051 | [
"Apache-2.0"
]
| null | null | null | qiskit/aqua/operators/base_operator.py | Sahar2/qiskit-aqua | a228fbe6b9613cff43e47796a7e4843deba2b051 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
from abc import ABC, abstractmethod
import warnings
from qiskit import QuantumCircuit
class BaseOperator(ABC):
"""Operators relevant for quantum applications."""
@abstractmethod
def __init__(self, basis=None, z2_symmetries=None, name=None):
"""Constructor."""
self._basis = basis
self._z2_symmetries = z2_symmetries
self._name = name if name is not None else ''
@property
def name(self):
return self._name
@name.setter
def name(self, new_value):
self._name = new_value
@property
def basis(self):
return self._basis
@property
def z2_symmetries(self):
return self._z2_symmetries
@abstractmethod
def __add__(self, other):
"""Overload + operation."""
raise NotImplementedError
@abstractmethod
def __iadd__(self, other):
"""Overload += operation."""
raise NotImplementedError
@abstractmethod
def __sub__(self, other):
"""Overload - operation."""
raise NotImplementedError
@abstractmethod
def __isub__(self, other):
"""Overload -= operation."""
raise NotImplementedError
@abstractmethod
def __neg__(self):
"""Overload unary - ."""
raise NotImplementedError
@abstractmethod
def __eq__(self, other):
"""Overload == operation."""
raise NotImplementedError
@abstractmethod
def __str__(self):
"""Overload str()."""
raise NotImplementedError
@abstractmethod
def __mul__(self, other):
"""Overload *."""
raise NotImplementedError
@abstractmethod
def construct_evaluation_circuit(self, wave_function):
"""Build circuits to compute the expectation w.r.t the wavefunction."""
raise NotImplementedError
@abstractmethod
def evaluate_with_result(self, result):
"""
Consume the result from the quantum computer to build the expectation,
will be only used along with the `construct_evaluation_circuit` method.
"""
raise NotImplementedError
@abstractmethod
def evolve(self):
"""
Time evolution, exp^(-jt H).
"""
raise NotImplementedError
@abstractmethod
def print_details(self):
raise NotImplementedError
@abstractmethod
def _scaling_weight(self, scaling_factor):
# TODO: will be removed after the deprecated method is removed.
raise NotImplementedError
@abstractmethod
def chop(self, threshold, copy=False):
raise NotImplementedError
def print_operators(self, mode='paulis'):
warnings.warn("print_operators() is deprecated and it will be removed after 0.6, "
"Use `print_details()` instead",
DeprecationWarning)
return self.print_details()
@property
def coloring(self):
warnings.warn("coloring is removed, "
"Use the `TPBGroupedWeightedPauliOperator` class to group a paulis directly",
DeprecationWarning)
return None
def _to_dia_matrix(self, mode=None):
warnings.warn("_to_dia_matrix method is removed, use the `MatrixOperator` class to get diagonal matrix. And "
"the current deprecated method does NOT modify the original object, it returns the dia_matrix",
DeprecationWarning)
from .op_converter import to_matrix_operator
mat_op = to_matrix_operator(self)
return mat_op.dia_matrix
def enable_summarize_circuits(self):
warnings.warn("enable_summarize_circuits method is removed. Enable the summary at QuantumInstance",
DeprecationWarning)
def disable_summarize_circuits(self):
warnings.warn("disable_summarize_circuits method is removed. Disable the summary at QuantumInstance",
DeprecationWarning)
@property
def representations(self):
warnings.warn("representations method is removed. each operator is self-defined, ",
DeprecationWarning)
return None
def eval(self, operator_mode, input_circuit, backend, backend_config=None, compile_config=None,
run_config=None, qjob_config=None, noise_config=None):
warnings.warn("eval method is removed. please use `construct_evaluate_circuit` and submit circuit by yourself "
"then, use the result along with `evaluate_with_result` to get mean and std. "
"Furthermore, if you compute the expectation against a statevector (numpy array), you can "
"use evaluate_with_statevector directly.",
DeprecationWarning)
return None, None
def convert(self, input_format, output_format, force=False):
warnings.warn("convert method is removed. please use the conversion functions in the "
"qiskit.aqua.operators.op_converter module. There are different `to_xxx_operator` functions"
" And the current deprecated method does NOT modify the original object, it returns.",
DeprecationWarning)
from .op_converter import to_weighted_pauli_operator, to_matrix_operator, to_tpb_grouped_weighted_pauli_operator
from .tpb_grouped_weighted_pauli_operator import TPBGroupedWeightedPauliOperator
if output_format == 'paulis':
return to_weighted_pauli_operator(self)
elif output_format == 'grouped_paulis':
return to_tpb_grouped_weighted_pauli_operator(self, TPBGroupedWeightedPauliOperator.sorted_grouping)
elif output_format == 'matrix':
return to_matrix_operator(self)
def two_qubit_reduced_operator(self, m, threshold=10 ** -13):
warnings.warn("two_qubit_reduced_operator method is deprecated and it will be removed after 0.6. "
"Now it is moved to the `Z2Symmetries` class as a classmethod. """
"Z2Symmeteries.two_qubit_reduction(num_particles)",
DeprecationWarning)
from .op_converter import to_weighted_pauli_operator
from .weighted_pauli_operator import Z2Symmetries
return Z2Symmetries.two_qubit_reduction(to_weighted_pauli_operator(self), m)
@staticmethod
def qubit_tapering(operator, cliffords, sq_list, tapering_values):
warnings.warn("qubit_tapering method is deprecated and it will be removed after 0.6. "
"Now it is moved to the `Z2Symmetries` class.",
DeprecationWarning)
from .op_converter import to_weighted_pauli_operator
from .weighted_pauli_operator import Z2Symmetries
sq_paulis = [x.paulis[1][1] for x in cliffords]
symmetries = [x.paulis[0][1] for x in cliffords]
tmp_op = to_weighted_pauli_operator(operator)
z2_symmetries = Z2Symmetries(symmetries, sq_paulis, sq_list, tapering_values)
return z2_symmetries.taper(tmp_op)
def scaling_coeff(self, scaling_factor):
warnings.warn("scaling_coeff method is deprecated and it will be removed after 0.6. "
"Use `* operator` with the scalar directly.",
DeprecationWarning)
self._scaling_weight(scaling_factor)
return self
def zeros_coeff_elimination(self):
warnings.warn("zeros_coeff_elimination method is deprecated and it will be removed after 0.6. "
"Use chop(0.0) to remove terms with 0 weight.",
DeprecationWarning)
self.chop(0.0)
return self
@staticmethod
def construct_evolution_circuit(slice_pauli_list, evo_time, num_time_slices, state_registers,
ancillary_registers=None, ctl_idx=0, unitary_power=None, use_basis_gates=True,
shallow_slicing=False):
from .common import evolution_instruction
warnings.warn("The `construct_evolution_circuit` method is deprecated, use the `evolution_instruction` in "
"the qiskit.aqua.operators.common module instead.",
DeprecationWarning)
if state_registers is None:
raise ValueError('Quantum state registers are required.')
qc_slice = QuantumCircuit(state_registers)
if ancillary_registers is not None:
qc_slice.add_register(ancillary_registers)
controlled = ancillary_registers is not None
inst = evolution_instruction(slice_pauli_list, evo_time, num_time_slices, controlled, 2 ** ctl_idx,
use_basis_gates, shallow_slicing)
qc_slice.append(inst, [q for qreg in qc_slice.qregs for q in qreg])
qc_slice = qc_slice.decompose()
return qc_slice
@staticmethod
def row_echelon_F2(matrix_in):
from .common import row_echelon_F2
warnings.warn("The `row_echelon_F2` method is deprecated, use the row_echelon_F2 function in "
"the qiskit.aqua.operators.common module instead.",
DeprecationWarning)
return row_echelon_F2(matrix_in)
@staticmethod
def kernel_F2(matrix_in):
from .common import kernel_F2
warnings.warn("The `kernel_F2` method is deprecated, use the kernel_F2 function in "
"the qiskit.aqua.operators.common module instead.",
DeprecationWarning)
return kernel_F2(matrix_in)
def find_Z2_symmetries(self):
warnings.warn("The `find_Z2_symmetries` method is deprecated and it will be removed after 0.6, "
"Use the class method in the `Z2Symmetries` class instead",
DeprecationWarning)
from .weighted_pauli_operator import Z2Symmetries
from .op_converter import to_weighted_pauli_operator
wp_op = to_weighted_pauli_operator(self)
self._z2_symmetries = Z2Symmetries.find_Z2_symmetries(wp_op)
return self._z2_symmetries.symmetries, self._z2_symmetries.sq_paulis, \
self._z2_symmetries.cliffords, self._z2_symmetries.sq_list
def to_grouped_paulis(self):
warnings.warn("to_grouped_paulis method is deprecated and it will be removed after 0.6. And the current "
"deprecated method does NOT modify the original object, it returns the grouped weighted pauli "
"operator. Please check the qiskit.aqua.operators.op_convertor for converting to different "
"types of operators. For grouping paulis, you can create your own grouping func to create the "
"class you need.",
DeprecationWarning)
from .op_converter import to_tpb_grouped_weighted_pauli_operator
from .tpb_grouped_weighted_pauli_operator import TPBGroupedWeightedPauliOperator
return to_tpb_grouped_weighted_pauli_operator(self, grouping_func=TPBGroupedWeightedPauliOperator.sorted_grouping)
def to_paulis(self):
warnings.warn("to_paulis method is deprecated and it will be removed after 0.6. And the current deprecated "
"method does NOT modify the original object, it returns the weighted pauli operator."
"Please check the qiskit.aqua.operators.op_convertor for converting to different types of "
"operators",
DeprecationWarning)
from .op_converter import to_weighted_pauli_operator
return to_weighted_pauli_operator(self)
def to_matrix(self):
warnings.warn("to_matrix method is deprecated and it will be removed after 0.6. And the current deprecated "
"method does NOT modify the original object, it returns the matrix operator."
"Please check the qiskit.aqua.operators.op_convertor for converting to different types of "
"operators",
DeprecationWarning)
from .op_converter import to_matrix_operator
return to_matrix_operator(self)
def to_weighted_pauli_operator(self):
warnings.warn("to_weighted_apuli_operator method is temporary helper method and it will be removed after 0.6. "
"Please check the qiskit.aqua.operators.op_convertor for converting to different types of "
"operators",
DeprecationWarning)
from .op_converter import to_weighted_pauli_operator
return to_weighted_pauli_operator(self)
def to_matrix_operator(self):
warnings.warn("to_matrix_operator method is temporary helper method and it will be removed after 0.6. "
"Please check the qiskit.aqua.operators.op_convertor for converting to different types of "
"operators",
DeprecationWarning)
from .op_converter import to_matrix_operator
return to_matrix_operator(self)
def to_tpb_grouped_weighted_pauli_operator(self):
warnings.warn("to_tpb_grouped_weighted_pauli_operator method is temporary helper method and it will be "
"removed after 0.6. Please check the qiskit.aqua.operators.op_convertor for converting to "
"different types of operators",
DeprecationWarning)
from .op_converter import to_tpb_grouped_weighted_pauli_operator
from .tpb_grouped_weighted_pauli_operator import TPBGroupedWeightedPauliOperator
return to_tpb_grouped_weighted_pauli_operator(
self, grouping_func=TPBGroupedWeightedPauliOperator.sorted_grouping)
| 44.633229 | 122 | 0.666877 | 13,645 | 0.958351 | 0 | 0 | 5,336 | 0.374772 | 0 | 0 | 5,017 | 0.352367 |
a0d37d7e9574c755f53a5c193de3f30cb81ee61a | 4,447 | py | Python | DataAnalysis/utils.py | Timlo512/AnomalyStockDetection | 29f9aaef14f1d9823980d8022cdce1f7f6310813 | [
"MIT"
]
| 2 | 2020-12-19T05:24:29.000Z | 2021-05-15T19:35:40.000Z | DataAnalysis/utils.py | Timlo512/AnomalyStockDetection | 29f9aaef14f1d9823980d8022cdce1f7f6310813 | [
"MIT"
]
| null | null | null | DataAnalysis/utils.py | Timlo512/AnomalyStockDetection | 29f9aaef14f1d9823980d8022cdce1f7f6310813 | [
"MIT"
]
| 5 | 2020-11-21T02:25:13.000Z | 2022-01-31T12:46:02.000Z | import pandas as pd
import numpy as np
from sklearn.metrics import confusion_matrix
import re
def convert_data_sparse_matrix(df, row_label = 'stock_code', col_label = 'name_of_ccass_participant', value_label = 'shareholding'):
"""
Pivot table
"""
try:
# Prepare zero matrix
row_dim = len(df[row_label].unique())
col_dim = len(df[col_label].unique())
sparse_matrix = np.zeros((row_dim, col_dim))
# Prepare label to index dictionaries
row_ind_dict = {label: ind for ind, label in enumerate(sorted(df[row_label].unique().tolist()))}
col_ind_dict = {label: ind for ind, label in enumerate(sorted(df[col_label].unique().tolist()))}
# Transform row_label column and col_label column to index
df['row_ind'] = df[row_label].apply(lambda x: row_ind_dict[x])
df['col_ind'] = df[col_label].apply(lambda x: col_ind_dict[x])
for ind, row in df.iterrows():
# Get index and shareholding
row_ind = row['row_ind']
col_ind = row['col_ind']
value = row[value_label]
# Assign to sparse matrix
sparse_matrix[row_ind, col_ind] += value
return sparse_matrix, row_ind_dict, col_ind_dict
except Exception as e:
print(e)
return None
def load_data(data_path):
# Read csv files
df = pd.read_csv(data_path)
# Convert stock code to formatted string
df['stock_code'] = df['stock_code'].apply(lambda x: ('00000' + str(x))[-5:])
return df
def f_score(y_truth, y_pred, beta = 1):
try:
# Run confusion_matrix
tn, fp, fn, tp = confusion_matrix(y_truth, y_pred).ravel()
precision_value = precision(tp, fp)
recall_value = recall(tp, fn)
# print recall
print('True positive: {}, True Negative: {}, False Positive: {}, False Negative: {}'.format(tp, tn, fp, fn))
print('Precision is ', format(precision_value * 100, '.2f'), '%')
print('Recall is ', format(recall_value * 100, '.2f'), '%')
return (1 + beta**2) * (precision_value * recall_value) / ((beta**2 * precision_value + recall_value))
except Exception as e:
print(e)
return None
def precision(tp, fp):
return tp / (tp + fp)
def recall(tp, fn):
return tp / (tp + fn)
def get_truth_label(path, threshold = 0.3):
# Load dataset
df = pd.read_csv(path)
# preprocess the data in order to get a proper data structure
df = df.set_index('Unnamed: 0').transpose().dropna()
df = df.reset_index()
df['index'] = df['index'].apply(lambda x: retrieve_stock_code(x))
df = df.set_index('index')
# Define col_dim and empty dataframe
col_dim = len(df.columns)
temp = pd.DataFrame()
# Create a list of column name without the first element
first_dim = df.columns[0]
col_list = df.columns.to_list()
col_list.remove(first_dim)
for col in col_list:
# Assign the col to second_dim, as current date
second_dim = col
# Calculate the daily % change of stock price
temp[col] = (df[second_dim] - df[first_dim]) / df[first_dim]
# Assign the col to first dim, as previous date
first_dim = col
result = np.sum(temp > threshold, axis = 1)
return {stock_code:1 if count > 0 else 0 for stock_code, count in result.items()}
def retrieve_stock_code(x):
d = re.search('[0-9]*', x)
if d:
return ('00000' + d.group(0))[-5:]
else:
return None
def cluster_predict(label, min_pts = 'auto'):
"""
Input: an array of clsutered label for each instance
return: an array of anomal label for each instance
"""
try:
# Get Unqiue label and its counts
(unique, counts) = np.unique(label, return_counts = True)
# Define minimum points that it should have in a cluster, if auto, it will take the min count
if min_pts == 'auto':
min_pts = min(counts)
print('Minimum points of a cluster among the clusters: ', min_pts)
else:
min_pts = int(min_pts)
# Prepare label_dict for mapping
label_dict = {label: 0 if count > min_pts else 1 for label, count in zip(unique, counts)}
# Map label_dict to label
return np.array([label_dict[i] for i in label])
except Exception as e:
print(e)
return None
| 32.698529 | 132 | 0.614796 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,263 | 0.284012 |
a0d5155e320c1b2b6704a06d42d9b58088cb485b | 1,429 | py | Python | scripts/prepare_upload_files.py | MaayanLab/scAVI | 7f3f83657d749520243535581db1080075e48aa5 | [
"Apache-2.0"
]
| 3 | 2020-01-23T08:48:33.000Z | 2021-07-21T02:42:28.000Z | scripts/prepare_upload_files.py | MaayanLab/scAVI | 7f3f83657d749520243535581db1080075e48aa5 | [
"Apache-2.0"
]
| 21 | 2019-10-25T15:38:37.000Z | 2022-01-27T16:04:04.000Z | scripts/prepare_upload_files.py | MaayanLab/scAVI | 7f3f83657d749520243535581db1080075e48aa5 | [
"Apache-2.0"
]
| 1 | 2019-10-24T18:15:26.000Z | 2019-10-24T18:15:26.000Z | '''
Prepare some files to test the upload functionality.
'''
import sys
sys.path.append('../')
from database import *
from pymongo import MongoClient
mongo = MongoClient(MONGOURI)
db = mongo['SCV']
coll = db['dataset']
from gene_expression import *
expr_df, meta_doc = load_read_counts_and_meta(organism='mouse', gse='GSE96870')
# rename the samples
expr_df.columns = ['sample_%d' % i for i in range(len(expr_df.columns))]
meta_df = pd.DataFrame(meta_doc['meta_df'])
meta_df.index = expr_df.columns
meta_df.index.name = 'sample_ID'
# parse the meta_df a bit
meta_df['Sample_characteristics_ch1'] = meta_df['Sample_characteristics_ch1'].map(lambda x:x.split('\t'))
keys_from_char_ch1 = [item.split(': ')[0] for item in meta_df['Sample_characteristics_ch1'][0]]
for i, key in enumerate(keys_from_char_ch1):
meta_df[key] = meta_df['Sample_characteristics_ch1'].map(lambda x:x[i].split(': ')[1])
# drop unnecessary columns in meta_df
meta_df = meta_df.drop(['Sample_characteristics_ch1',
'Sample_relation', 'Sample_geo_accession', 'Sample_supplementary_file_1'],
axis=1)
# fake a column of continuous values
meta_df['random_continuous_attr'] = np.random.randn(meta_df.shape[0])
meta_df.to_csv('../data/sample_metadata.csv')
# raw read counts
expr_df.to_csv('../data/sample_read_counts_%dx%d.csv' % expr_df.shape)
# CPMs
expr_df = compute_CPMs(expr_df)
expr_df.to_csv('../data/sample_CPMs_%dx%d.csv' % expr_df.shape)
| 30.404255 | 105 | 0.751575 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 611 | 0.427572 |
a0d646ba03a4465fe2514a5e2b0f73386fb45c4c | 2,321 | py | Python | app/api/V1/views/products.py | Paulvitalis200/Store-Manager-API | d61e91bff7fc242da2a93d1caf1012465c7c904a | [
"MIT"
]
| null | null | null | app/api/V1/views/products.py | Paulvitalis200/Store-Manager-API | d61e91bff7fc242da2a93d1caf1012465c7c904a | [
"MIT"
]
| 4 | 2018-10-21T18:28:03.000Z | 2018-10-24T12:48:24.000Z | app/api/V1/views/products.py | Paulstar200/Store-Manager-API | d61e91bff7fc242da2a93d1caf1012465c7c904a | [
"MIT"
]
| null | null | null | from flask import Flask, request
from flask_restful import Resource, reqparse
from flask_jwt_extended import create_access_token, jwt_required
from app.api.V1.models import Product, products
class PostProduct(Resource):
parser = reqparse.RequestParser()
parser.add_argument('name', required=True, help='Product name cannot be blank', type=str)
parser.add_argument('price', required=True, help=' Product price cannot be blank or a word', type=int)
parser.add_argument('quantity', required=True, help='Product quantity cannot be blank or a word', type=int)
@jwt_required
def post(self):
# input validation
data = request.get_json()
args = PostProduct.parser.parse_args()
name = args.get('name').strip() # removes whitespace
price = args.get('price')
quantity = args.get('quantity')
payload = ['name', 'price', 'quantity']
if not name or not price or not quantity:
return {'message': 'Product name, price and quantity are all required'}, 400
else:
# Check if the item is not required
for item in data.keys():
if item not in payload:
return {"message": "The field '{}' is not required for the products".format(item)}, 400
try:
product = Product.create_product(name, price, quantity)
return {
'message': 'Product created successfully!',
'product': product,
'status': 'ok'
}, 201
except Exception as my_exception:
print(my_exception)
return {'message': 'Something went wrong.'}, 500
class GetAllProducts(Resource):
# Both attendant and store owner can get products
@jwt_required
def get(self):
products = Product.get_products()
if len(products) == 0:
return {'message': "No products created yet."}
return {
'message': 'Products successfully retrieved!',
'products': products
}, 200
# Get a single specific product
class GetEachProduct(Resource):
@jwt_required
def get(self, product_id):
try:
return products[product_id - 1]
except IndexError:
return {"message": "No item with that ID in stock"}
| 35.166667 | 111 | 0.616545 | 2,088 | 0.899612 | 0 | 0 | 1,577 | 0.679449 | 0 | 0 | 677 | 0.291685 |
a0d68497a4530b9b9bb8366ff9da7d608dd9a751 | 1,155 | py | Python | 51-100/p87.py | YiWeiShen/Project-Euler-Hints | a79cacab075dd98d393516f083aaa7ffc6115a06 | [
"MIT"
]
| 1 | 2019-02-25T13:00:31.000Z | 2019-02-25T13:00:31.000Z | 51-100/p87.py | YiWeiShen/Project-Euler-Hints | a79cacab075dd98d393516f083aaa7ffc6115a06 | [
"MIT"
]
| null | null | null | 51-100/p87.py | YiWeiShen/Project-Euler-Hints | a79cacab075dd98d393516f083aaa7ffc6115a06 | [
"MIT"
]
| null | null | null | import time
from multiprocessing.pool import Pool
def is_prime(num):
for i in range(2, int(num**0.5+1)):
if num % i == 0:
return None
return num
if __name__ == '__main__':
t = time.time()
p1 = Pool(processes=30)
p2 = Pool(processes=30)
p3 = Pool(processes=30)
num1 = range(2, 7072)
num2 = range(2, 369)
num3 = range(2, 85)
prime_list1 = p1.map(is_prime, num1)
p1.close()
p1.join()
prime_list2 = p2.map(is_prime, num2)
p2.close()
p2.join()
prime_list3 = p3.map(is_prime, num3)
p3.close()
p3.join()
prime_list1_clear = [x for x in prime_list1 if x is not None]
prime_list2_clear = [x for x in prime_list2 if x is not None]
prime_list3_clear = [x for x in prime_list3 if x is not None]
result_list = []
for i in prime_list1_clear:
print(i)
for j in prime_list2_clear:
for k in prime_list3_clear:
test_num = i**2 + j**3 + k**4
if test_num < 50000000:
result_list.append(test_num)
print(str(len(list(set(result_list)))))
print('time:'+str(time.time()-t))
| 26.860465 | 65 | 0.587013 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.014719 |
a0d6b47a07ed18120ebb9b10352d658a22a11ecb | 267 | py | Python | Clean Word/index.py | Sudani-Coder/python | 9c35f04a0521789ba91b7058695139ed074f7796 | [
"MIT"
]
| null | null | null | Clean Word/index.py | Sudani-Coder/python | 9c35f04a0521789ba91b7058695139ed074f7796 | [
"MIT"
]
| null | null | null | Clean Word/index.py | Sudani-Coder/python | 9c35f04a0521789ba91b7058695139ed074f7796 | [
"MIT"
]
| null | null | null | # recursion function (Clean Word)
def CleanWord(word):
if len(word) == 1:
return word
elif word[0] == word[1]:
return CleanWord(word[1:])
else:
return word[0] + CleanWord(word[1:])
print(CleanWord("wwwooooorrrrllddd"))
| 19.071429 | 44 | 0.58427 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.194757 |
a0d7aa3f87b3b51ae56654591cba7faff73f9f8f | 665 | py | Python | commands/rotatecamera.py | 1757WestwoodRobotics/mentorbot | 3db344f3b35c820ada4e1aef3eca9b1fc4c5b85a | [
"MIT"
]
| 2 | 2021-11-13T20:18:44.000Z | 2021-11-13T20:27:04.000Z | commands/rotatecamera.py | 1757WestwoodRobotics/mentorbot | 3db344f3b35c820ada4e1aef3eca9b1fc4c5b85a | [
"MIT"
]
| null | null | null | commands/rotatecamera.py | 1757WestwoodRobotics/mentorbot | 3db344f3b35c820ada4e1aef3eca9b1fc4c5b85a | [
"MIT"
]
| 1 | 2021-11-14T01:38:53.000Z | 2021-11-14T01:38:53.000Z | import typing
from commands2 import CommandBase
from subsystems.cameracontroller import CameraSubsystem
class RotateCamera(CommandBase):
def __init__(self, camera: CameraSubsystem,
leftRight: typing.Callable[[], float],
upDown: typing.Callable[[], float]) -> None:
CommandBase.__init__(self)
self.setName(__class__.__name__)
self.camera = camera
self.leftRight = leftRight
self.upDown = upDown
self.addRequirements([self.camera])
self.setName(__class__.__name__)
def execute(self) -> None:
self.camera.setCameraRotation(self.leftRight(), self.upDown())
| 28.913043 | 70 | 0.667669 | 557 | 0.837594 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a0d85ead79155e87bca877ab2df552ddd4292930 | 8,188 | py | Python | instapp/views.py | uwamahororachel/instagram | d5b7127e62047287dfadec15743676df48f278a9 | [
"MIT"
]
| null | null | null | instapp/views.py | uwamahororachel/instagram | d5b7127e62047287dfadec15743676df48f278a9 | [
"MIT"
]
| null | null | null | instapp/views.py | uwamahororachel/instagram | d5b7127e62047287dfadec15743676df48f278a9 | [
"MIT"
]
| null | null | null | from django.shortcuts import render,redirect
from django.http import HttpResponse, Http404,HttpResponseRedirect
import datetime as dt
from .models import Post,Comment,Follow,Profile
from django.contrib.auth.decorators import login_required
from .forms import NewPostForm, NewCommentForm, AddProfileForm
from django.contrib.auth.models import User
def signup(request):
if request.user.is_authenticated():
return redirect('insta')
else:
if request.method == 'POST':
form = SignupForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.save()
new_profile = Profile(user=user)
else:
form = SignupForm()
return render(request, 'registration/registration_form.html',{'form':form})
@login_required(login_url='/accounts/login/')
def insta(request):
title='Instapp'
users = User.objects.all()
current_user = request.user
profile = Profile.objects.filter(user=current_user).first()
if profile == None:
my_profile = None
else:
my_profile=profile
comments = Comment.objects.all().order_by('-date_posted')
posts = Post.objects.all().order_by('-date_posted')
for post in posts:
if request.method=='POST' and 'post' in request.POST:
posted=request.POST.get("post")
for post in posts:
if (int(post.id)==int(posted)):
post.like+=1
post.save()
return redirect('insta')
return render(request, 'index.html', {"posts": posts, 'comments':comments,'users':users,'user':current_user,'my_profile':my_profile,'title':title})
@login_required(login_url='/accounts/login/')
def new_post(request):
current_user = request.user
profile = Profile.get_profile(current_user)
if profile == None:
return redirect('add_profile')
else:
if request.method == 'POST':
form = NewPostForm(request.POST, request.FILES)
if form.is_valid():
post = form.save(commit=False)
post.user = current_user
post.profile = profile
post.save()
return redirect('insta')
else:
form = NewPostForm()
return render(request, 'newPost.html', {"form": form})
@login_required(login_url='/accounts/login/')
def single_post(request, post_id):
post = Post.objects.get(pk=post_id)
comments = Comment.get_comments_by_post(post_id).order_by('-date_posted')
current_user = request.user
if request.method == 'POST':
form = NewCommentForm(request.POST)
if form.is_valid():
new_comment = form.save(commit=False)
new_comment.user = current_user
new_comment.post = post
new_comment.save()
return redirect('single_post',post_id=post_id)
if request.method=='POST' and 'post' in request.POST:
posted=request.POST.get("post")
for post in posts:
if (int(post.id)==int(posted)):
post.like+=1
post.save()
return redirect('single_post',post_id=post_id)
else:
form = NewCommentForm()
return render(request, 'post.html', {'post':post, 'form':form,'comments':comments})
@login_required(login_url='/accounts/login/')
def my_profile(request):
current_user = request.user
profile = Profile.objects.get(user=current_user)
count = Post.objects.filter(profile=profile).count
comments = Comment.objects.all().order_by('-date_posted')
posts = None
if profile == None:
return redirect('add_profile')
else:
posts = Post.get_posts_by_id(profile.id).order_by('-date_posted')
for post in posts:
if request.method=='POST' and 'post' in request.POST:
posted=request.POST.get("post")
for post in posts:
if (int(post.id)==int(posted)):
post.like+=1
post.save()
return redirect('profile', profile_id=profile_id)
return render(request, 'profile.html', {"posts": posts, "profile": profile, 'count':count,'comments':comments})
@login_required(login_url='/accounts/login/')
def update_post(request,post_id):
post= Post.objects.get(pk=post_id).order_by('-date_posted')
if request.method == 'POST':
form = NewPostForm(request.POST)
if form.is_valid():
post.caption=form_data.cleaned_data[caption]
post=post.update_post(post_id,caption)
return redirect('my_profile')
else:
form = NewPostForm()
return render(request, 'postUpdate.html',{'form':form,'post':post})
def delete_post(request,post_id):
post= Post.objects.get(pk=post_id)
post.delete_post()
return redirect('my_profile')
return render(request, 'my_profile')
@login_required(login_url='/accounts/login/')
def new_post(request):
current_user = request.user
profile = Profile.get_profile(current_user)
if profile == None:
return redirect('add_profile')
else:
if request.method == 'POST':
form = NewPostForm(request.POST, request.FILES)
if form.is_valid():
post = form.save(commit=False)
post.user = current_user
post.profile = profile
post.save()
return redirect('insta')
else:
form = NewPostForm()
return render(request, 'newPost.html', {"form": form})
@login_required(login_url='/accounts/login/')
def add_profile(request):
current_user = request.user
if request.method == 'POST':
form = AddProfileForm(request.POST, request.FILES)
if form.is_valid():
new_profile = form.save(commit=False)
new_profile.user = current_user
new_profile.save()
return redirect('my_profile')
else:
form = AddProfileForm()
return render(request, 'addProfile.html', {"form": form})
@login_required(login_url='/accounts/login/')
def update_profile(request):
current_user = request.user
if request.method == 'POST':
form = AddProfileForm(request.POST, request.FILES)
if form.is_valid():
new_profile = form.save(commit=False)
new_profile.user = current_user
new_profile.save()
return redirect('my_profile')
else:
form = AddProfileForm()
return render(request, 'addProfile.html', {"form": form})
@login_required(login_url='/accounts/login/')
def search_results(request):
if 'user' in request.GET and request.GET["user"]:
search_term = request.GET.get("user")
profiles = Profile.find_profile(search_term)
message = f"{search_term}"
return render(request, 'search.html',{"results": profiles, "message":message})
else:
message = "You haven't searched for any term"
return render(request, 'search.html',{"message":message})
@login_required(login_url='/accounts/login/')
def profile(request, profile_id):
profile = Profile.get_profile_id(profile_id)
posts = Post.objects.filter(profile=profile.id).order_by('-date_posted')
count = Post.objects.filter(profile=profile).count
comments = Comment.objects.all().order_by('-date_posted')
for post in posts:
if request.method=='POST' and 'post' in request.POST:
posted=request.POST.get("post")
for post in posts:
if (int(post.id)==int(posted)):
post.like+=1
post.save()
return redirect('profile', profile_id=profile_id)
return render(request, 'userProfile.html', {"posts": posts, "profile": profile, 'count':count,'comments':comments})
@login_required(login_url='/accounts/login/')
def follow(request, profile_id):
current_user = request.user
profile = Profile.get_profile_id(profile_id)
follow_user = Follow(user=current_user, profile=profile)
follow_user.save()
myprofile_id= str(profile.id)
return redirect('insta')
| 36.882883 | 151 | 0.626282 | 0 | 0 | 0 | 0 | 7,171 | 0.875794 | 0 | 0 | 1,083 | 0.132267 |
a0d898d83393f9e2a6f4299d21f948ceddccd556 | 238 | py | Python | 2008/wxpytris/wxpytris.py | mikiec84/code-for-blog | 79b2264f9a808eb14f624cb3c5ae7624038c043a | [
"Unlicense"
]
| 1,199 | 2015-01-06T14:09:37.000Z | 2022-03-29T19:39:51.000Z | 2008/wxpytris/wxpytris.py | mikiec84/code-for-blog | 79b2264f9a808eb14f624cb3c5ae7624038c043a | [
"Unlicense"
]
| 25 | 2016-07-29T15:44:01.000Z | 2021-11-19T16:21:01.000Z | 2008/wxpytris/wxpytris.py | mikiec84/code-for-blog | 79b2264f9a808eb14f624cb3c5ae7624038c043a | [
"Unlicense"
]
| 912 | 2015-01-04T00:39:50.000Z | 2022-03-29T06:50:22.000Z | import sys
import wx
sys.path.insert(0, 'lib.zip')
from lib.TetrisGame import TetrisGame
if __name__ == '__main__':
app = wx.PySimpleApp()
frame = TetrisGame(None)
frame.Show(True)
app.MainLoop()
| 11.9 | 38 | 0.617647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.079832 |
a0d89d58810bc392058c43540e5719fda8ed9934 | 6,822 | py | Python | cfg.py | alexandonian/relational-set-abstraction | 8af6a6a58883ce59c7b29e4161ff970e3bded642 | [
"MIT"
]
| 9 | 2020-09-17T23:09:42.000Z | 2021-12-29T09:56:24.000Z | cfg.py | alexandonian/relational-set-abstraction | 8af6a6a58883ce59c7b29e4161ff970e3bded642 | [
"MIT"
]
| null | null | null | cfg.py | alexandonian/relational-set-abstraction | 8af6a6a58883ce59c7b29e4161ff970e3bded642 | [
"MIT"
]
| 1 | 2021-01-16T07:19:42.000Z | 2021-01-16T07:19:42.000Z | import argparse
import torch
import logger
import models
import utils
NUM_NODES = {
'moments': 391,
'multimoments': 391,
'kinetics': 608,
}
CRITERIONS = {
'CE': {'func': torch.nn.CrossEntropyLoss},
'MSE': {'func': torch.nn.MSELoss},
'BCE': {'func': torch.nn.BCEWithLogitsLoss},
}
OPTIMIZERS = {
'SGD': {
'func': torch.optim.SGD,
'lr': 0.001,
'momentum': 0.9,
'weight_decay': 5e-4,
},
'Adam': {'func': torch.optim.Adam, 'weight_decay': 5e-4},
}
SCHEDULER_DEFAULTS = {'CosineAnnealingLR': {'T_max': 100}}
METAFILE_FILE = {
'moments': {
'train': 'metadata/moments_train_abstraction_sets.json',
'val': 'metadata/moments_val_abstraction_sets.json',
},
'kinetics': {
'train': 'metadata/kinetics_train_abstraction_sets.json',
'val': 'metadata/kinetics_val_abstraction_sets.json',
},
}
FEATURES_FILE = {
'moments': {
'train': 'metadata/resnet3d50_moments_train_features.pth',
'val': 'metadata/resnet3d50_moments_val_features.pth',
'test': 'metadata/resnet3d50_moments_test_features.pth',
},
'kinetics': {
'train': 'metadata/resnet3d50_kinetics_train_features.pth',
'val': 'metadata/resnet3d50_kinetics_val_features.pth',
'test': 'metadata/resnet3d50_kinetics_test_features.pth',
},
}
EMBEDDING_FILE = {
'moments': {
'train': 'metadata/moments_train_embeddings.pth',
'val': 'metadata/moments_val_embeddings.pth',
},
'kinetics': {
'train': 'metadata/kinetics_train_embeddings.pth',
'val': 'metadata/kinetics_val_embeddings.pth',
'test': 'metadata/kinetics_test_embeddings.pth',
},
}
EMBEDDING_CATEGORIES_FILE = {
'moments': 'metadata/moments_category_embeddings.pth',
'kinetics': 'metadata/kinetics_category_embeddings.pth',
}
LIST_FILE = {
'moments': {
'train': 'metadata/moments_train_listfile.txt',
'val': 'metadata/moments_val_listfile.txt',
'test': 'metadata/moments_test_listfile.txt',
},
'kinetics': {
'train': 'metadata/kinetics_train_listfile.txt',
'val': 'metadata/kinetics_val_listfile.txt',
'test': 'metadata/kinetics_test_listfile.txt',
},
}
RANKING_FILE = {
'moments': 'metadata/moments_human_abstraction_sets.json',
'kinetics': 'metadata/kinetics_human_abstraction_sets.json',
}
GRAPH_FILE = {
'moments': 'metadata/moments_graph.json',
'kinetics': 'metadata/kinetics_graph.json',
}
def parse_args():
parser = argparse.ArgumentParser(description="Abstraction Experiments")
parser.add_argument(
'-e',
'--experiment',
type=str,
default='AbstractionEmbedding',
help="name of experiment to run",
)
parser.add_argument(
'-i',
'--exp_id',
type=str,
help="unique name or id of particular experimental run",
)
parser.add_argument(
'-d',
'--dataset',
type=str,
default='moments',
choices=['moments', 'kinetics'],
help='name of dataset',
)
parser.add_argument(
'-m',
'--model_name',
type=str,
default='AbstractionEmbeddingModule',
help='class name of model to instantiate',
)
parser.add_argument(
'-b',
'--batch_size',
type=int,
default=256,
help='number of elements (sets) in batch',
)
parser.add_argument('--optimizer', type=str, default='Adam')
parser.add_argument('--criterion', nargs='+', default=['MSE', 'CE'])
parser.add_argument('-l', '--loss_weights', nargs='+', default=[1, 1], type=float)
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('-s', '--scales', nargs='+', default=[1, 2, 3, 4], type=int)
parser.add_argument('-r', '--resume', type=str, default=None)
parser.add_argument('--log_dir', type=str, default='logs')
parser.add_argument('--checkpoint_dir', type=str, default='checkpoints')
parser.add_argument('--output_dir', type=str, default='outputs')
parser.add_argument('--metadata_dir', type=str, default='metadata')
parser.add_argument('--logger_name', type=str, default='AbstractionLogger')
parser.add_argument('--num_epochs', type=int, default=60)
parser.add_argument('--start_epoch', type=int, default=0)
parser.add_argument('--max_step', type=int, default=None)
parser.add_argument('--val_freq', type=int, default=1)
parser.add_argument('--log_freq', type=int, default=20)
parser.add_argument('--checkpoint_freq', type=int, default=1000)
parser.add_argument('--cudnn_enabled', default=True, type=utils.str2bool)
parser.add_argument('--cudnn_benchmark', default=True, type=utils.str2bool)
parser.add_argument('--clip_gradient', type=int, default=20)
parser.add_argument('--evaluate', action='store_true')
parser.add_argument('-bm', '--basemodel_name', type=str, default='resnet3d50')
parser.add_argument('--prefix', type=str, default='')
parser.add_argument('--return_metric', type=str, default='top1@abstr')
args = parser.parse_args()
return args
def get_model(model_name, dataset_name, scales=4, basemodel='resnet3d50'):
feature_dim = {'resnet3d50': 2048}.get(basemodel, 2048)
model_dict = {
'AbstractionEmbeddingModule': {
'func': models.AbstractionEmbeddingModule,
'in_features': feature_dim,
'out_features': feature_dim,
'num_nodes': NUM_NODES[dataset_name],
'embedding_dim': 300,
'bottleneck_dim': 512,
'scales': scales,
},
}.get(model_name)
model_func = model_dict.pop('func')
return model_func(**model_dict)
def get_criterion(names=['CE', 'MSE'], cuda=True):
criterions = {name: CRITERIONS[name]['func']() for name in names}
if cuda:
criterions = {name: crit.cuda() for name, crit in criterions.items()}
return criterions
def get_optimizer(model, optimizer_name, lr=0.001):
optim_dict = OPTIMIZERS[optimizer_name]
optim_func = optim_dict.pop('func', torch.optim.Adam)
optimizer = optim_func(model.parameters(), **{**optim_dict, 'lr': lr})
return optimizer
def get_scheduler(optimizer, scheduler_name='CosineAnnealingLR', **kwargs):
sched_func = getattr(torch.optim.lr_scheduler, scheduler_name)
func_kwargs, _ = utils.split_kwargs_by_func(sched_func, kwargs)
sched_kwargs = {**SCHEDULER_DEFAULTS.get(scheduler_name, {}), **func_kwargs}
scheduler = sched_func(optimizer, **sched_kwargs)
return scheduler
def get_logger(args):
logger_func = getattr(logger, args.logger_name)
logger_dict, _ = utils.split_kwargs_by_func(logger_func, vars(args).copy())
return logger_func(**logger_dict)
| 32.956522 | 86 | 0.650836 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,540 | 0.372325 |
a0dac9d01fbc63e4052a6ea761aeaa779debac1b | 2,021 | py | Python | Spider/SpiderLab/lab3/lab3/spiders/spider_msg.py | JimouChen/python-application | b7b16506a17e2c304d1c5fabd6385e96be211c56 | [
"Apache-2.0"
]
| 1 | 2020-08-09T12:47:27.000Z | 2020-08-09T12:47:27.000Z | Spider/SpiderLab/lab3/lab3/spiders/spider_msg.py | JimouChen/Python_Application | b7b16506a17e2c304d1c5fabd6385e96be211c56 | [
"Apache-2.0"
]
| null | null | null | Spider/SpiderLab/lab3/lab3/spiders/spider_msg.py | JimouChen/Python_Application | b7b16506a17e2c304d1c5fabd6385e96be211c56 | [
"Apache-2.0"
]
| null | null | null | import scrapy
from bs4 import BeautifulSoup
from lab3.items import Lab3Item
class QuoteSpider(scrapy.Spider):
name = 'quotes'
start_urls = ['http://quotes.toscrape.com/page/1/']
page_num = 1
# 对爬取到的信息进行解析
def parse(self, response, **kwargs):
soup = BeautifulSoup(response.body, 'html.parser')
nodes = soup.find_all('div', {'class': 'quote'})
for node in nodes:
text = node.find('span', {'class': 'text'}).text
author = node.find('small', {'class': 'author'}).text
tags = node.find_all('a', {'class': 'tag'})
tags_list = []
for tag in tags:
tags_list.append(tag.text)
# 接下来找作者链接,进去爬取里面的信息
author_link = 'http://quotes.toscrape.com/' + node.find_all('span')[1].a['href']
# 抛给author_parse进行处理
yield response.follow(author_link, self.author_parse)
# print('{0:<4}:{1:<20} said:{2:<20}\n{3}'.format(self.page_num, author, text, tags_list))
item = Lab3Item(author=author, text=text, tags=tags_list)
yield item
print('=' * 80 + 'page:',self.page_num,'saved successfully!' + '=' * 80)
# 下面爬取下一页的链接
try:
self.page_num += 1
url = soup.find('li', {'class': 'next'}).a['href']
if url:
next_link = 'http://quotes.toscrape.com/' + url
yield scrapy.Request(next_link, callback=self.parse)
except Exception:
print('所有页面信息爬取结束!!!')
def author_parse(self, response, **kwargs):
soup = BeautifulSoup(response.body, 'html.parser')
author_name = soup.find_all('div', {'class': 'author-details'})[0].find('h3').text
birthday = soup.find('span').text
bio = soup.find('div', {'class': 'author-description'}).text
# print('{}: {}\n{}\n{}\n'.format(self.page_num, author_name, birthday, bio))
item = Lab3Item(name=author_name, birthday=birthday, bio=bio)
yield item
| 40.42 | 102 | 0.568036 | 2,058 | 0.963032 | 1,880 | 0.879738 | 0 | 0 | 0 | 0 | 705 | 0.329902 |
a0db51a733ae0c8c54da89e34dba10cbd38f7150 | 1,236 | py | Python | Aditya/Parametric_Models/WeiExpLog.py | cipheraxat/Survival-Analysis | fb7ecbe4a61fc72785a4327c86e0f81a58c5b3df | [
"Apache-2.0"
]
| 7 | 2020-06-14T20:43:55.000Z | 2020-06-23T06:07:08.000Z | Aditya/Parametric_Models/WeiExpLog.py | Abhijit2505/Survival-Analysis | 94c0c386aacfe03a9f2f018511236292f36c4ed9 | [
"Apache-2.0"
]
| 14 | 2020-06-20T06:28:50.000Z | 2020-09-08T15:54:29.000Z | Aditya/Parametric_Models/WeiExpLog.py | Abhijit2505/Survival-Analysis | 94c0c386aacfe03a9f2f018511236292f36c4ed9 | [
"Apache-2.0"
]
| 9 | 2020-06-19T03:50:21.000Z | 2021-05-10T18:19:26.000Z | import matplotlib.pyplot as plt
from lifelines import (WeibullFitter, ExponentialFitter,
LogNormalFitter, LogLogisticFitter)
import pandas as pd
data = pd.read_csv('Dataset/telco_customer.csv')
data['tenure'] = pd.to_numeric(data['tenure'])
data = data[data['tenure'] > 0]
# Replace yes and No in the Churn column to 1 and 0. 1 for the event and 0 for the censured data.
data['Churn'] = data['Churn'].apply(lambda x: 1 if x == 'Yes' else 0)
fig, axes = plt.subplots(2, 2, figsize=(
16, 12))
T = data['tenure']
E = data['Churn']
wbf = WeibullFitter().fit(T, E, label='WeibullFitter')
ef = ExponentialFitter().fit(T, E, label='ExponentialFitter')
lnf = LogNormalFitter().fit(T, E, label='LogNormalFitter')
llf = LogLogisticFitter().fit(T, E, label='LogLogisticFitter')
wbf.plot_cumulative_hazard(ax=axes[0][0])
ef.plot_cumulative_hazard(ax=axes[0][1])
lnf.plot_cumulative_hazard(ax=axes[1][0])
llf.plot_cumulative_hazard(ax=axes[1][1])
plt.suptitle(
'Parametric Model Implementation of cumulative hazard function on the Telco dataset')
fig.text(0.5, 0.04, 'Timeline', ha='center')
fig.text(0.04, 0.5, 'Probability', va='center', rotation='vertical')
plt.savefig('Images/WeiExpLogx.jpeg')
plt.show()
| 34.333333 | 97 | 0.711974 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 410 | 0.331715 |
a0de95c4112c071280835a86de6b15a92fec2e83 | 2,260 | py | Python | spoteno/steps/numbers.py | Z-80/spoteno | 5d2ae7da437cfd8f9cf351b9602269c115dcd46f | [
"MIT"
]
| 2 | 2020-01-16T10:23:05.000Z | 2021-11-17T15:44:29.000Z | spoteno/steps/numbers.py | Z-80/spoteno | 5d2ae7da437cfd8f9cf351b9602269c115dcd46f | [
"MIT"
]
| null | null | null | spoteno/steps/numbers.py | Z-80/spoteno | 5d2ae7da437cfd8f9cf351b9602269c115dcd46f | [
"MIT"
]
| 2 | 2021-03-25T12:06:36.000Z | 2021-11-17T15:44:30.000Z | import re
import num2words
INT_PATTERN = re.compile(r'^-?[0-9]+$')
FLOAT_PATTERN = re.compile(r'^-?[0-9]+[,\.][0-9]+$')
ORDINAL_PATTERN = re.compile(r'^[0-9]+\.?$')
NUM_PATTERN = re.compile(r'^-?[0-9]+([,\.][0-9]+$)?')
class NumberToWords:
def __init__(self, lang_code):
self.lang_code = lang_code
def run(self, token):
float_match = FLOAT_PATTERN.match(token)
if float_match is not None:
out = []
if token.startswith('-'):
out.append('minus')
token = token[1:]
num_word = num2words.num2words(
float(token.replace(',', '.')),
lang=self.lang_code
).lower()
out.extend(num_word.split(' '))
return out
int_match = INT_PATTERN.match(token)
if int_match is not None:
out = []
if token.startswith('-'):
out.append('minus')
token = token[1:]
num_word = num2words.num2words(
int(token.replace(',', '.')),
lang=self.lang_code
).lower()
out.extend(num_word.split(' '))
return out
return [token]
class OrdinalNumberToWords:
def __init__(self, lang_code):
self.lang_code = lang_code
def run(self, token):
match = ORDINAL_PATTERN.match(token)
if match is not None:
num_word = num2words.num2words(
int(token[:-1]),
lang=self.lang_code,
to='ordinal'
).lower()
return num_word.split(' ')
return [token]
class SplitNumberSuffix:
"""
If any of the given strings is directly connected to
a number it is separated.
"2000%" -> "2000" "%"
But not "2000%ff"
"""
def __init__(self, suffixes):
self.suffixes = sorted(suffixes, reverse=True)
def run(self, token):
for s in self.suffixes:
if token.endswith(s):
should_be_number = token[:-len(s)]
match = NUM_PATTERN.match(should_be_number)
if match is not None:
return [token[:-len(s)], token[-len(s):]]
return [token]
| 23.541667 | 61 | 0.511504 | 2,029 | 0.897788 | 0 | 0 | 0 | 0 | 0 | 0 | 275 | 0.121681 |
a0e1d41f3732cef98c2895b100facec425069d9c | 4,252 | py | Python | src/django_website/django_website/tests/test_views.py | jdheinz/project-ordo_ab_chao | 4063f93b297bab43cff6ca64fa5ba103f0c75158 | [
"MIT"
]
| 2 | 2019-09-23T18:42:32.000Z | 2019-09-27T00:33:38.000Z | src/django_website/django_website/tests/test_views.py | jdheinz/project-ordo_ab_chao | 4063f93b297bab43cff6ca64fa5ba103f0c75158 | [
"MIT"
]
| 6 | 2021-03-19T03:25:33.000Z | 2022-02-10T08:48:14.000Z | src/django_website/django_website/tests/test_views.py | jdheinz/project-ordo_ab_chao | 4063f93b297bab43cff6ca64fa5ba103f0c75158 | [
"MIT"
]
| 6 | 2019-09-23T18:53:41.000Z | 2020-02-06T00:20:06.000Z | from django.test import TransactionTestCase
from django.test import TestCase
from django.urls import reverse
from home_page.models import Search
from ebaysdk.finding import Connection as finding
class PageTest(TransactionTestCase):
def test_home_page_status_code_1(self):
response = self.client.get('/')
self.assertEquals(response.status_code, 200)
def test_home_page_status_code_2(self):
response = self.client.get('/home/')
self.assertEquals(response.status_code, 200)
def test_home_page_view_url_by_name(self):
response = self.client.get(reverse('home'))
self.assertEquals(response.status_code, 200)
def test_home_page_contains_correct_html(self):
response = self.client.get('/')
self.assertContains(response, 'Please enter key search words below:')
def test_home_page_does_not_contain_incorrect_html(self):
response = self.client.get('/')
self.assertNotContains(
response, 'Hi there! I should not be on the page.')
def test_graphs_page_status_code(self):
response = self.client.get('/graphs/')
self.assertEquals(response.status_code, 200)
def test_graphs_page_view_url_by_name(self):
response = self.client.get(reverse('graphs'))
self.assertEquals(response.status_code, 200)
def test_contact_page_status_code(self):
response = self.client.get('/contact/')
self.assertEquals(response.status_code, 200)
def test_contact_page_view_url_by_name(self):
response = self.client.get(reverse('contact'))
self.assertEquals(response.status_code, 200)
def test_about_page_status_code(self):
response = self.client.get('/about/')
self.assertEquals(response.status_code, 200)
def test_about_page_view_url_by_name(self):
response = self.client.get(reverse('about'))
self.assertEquals(response.status_code, 200)
def test_about_page_contains_correct_html(self):
response = self.client.get('/about/')
self.assertContains(response, 'ordo_ab_chao team members:')
def test_about_page_does_not_contain_incorrect_html(self):
response = self.client.get('/about/')
self.assertNotContains(
response, 'Hi there! I should not be on the page.')
def test_blog_page_status_code(self):
response = self.client.get('/blog/')
self.assertEquals(response.status_code, 200)
def test_blog_page_view_url_by_name(self):
response = self.client.get(reverse('blog'))
self.assertEquals(response.status_code, 200)
def test_aboutWebsite_page_status_code(self):
response = self.client.get('/about website/')
self.assertEquals(response.status_code, 200)
def test_aboutWebsite_page_view_url_by_name(self):
response = self.client.get(reverse('about website'))
self.assertEquals(response.status_code, 200)
def test_directions_page_status_code(self):
response = self.client.get('/directions/')
self.assertEquals(response.status_code, 200)
def test_directions_page_view_url_by_name(self):
response = self.client.get(reverse('directions'))
self.assertEquals(response.status_code, 200)
class SearchModelTest(TestCase):
def test_keywords_respresentation(self):
keywords1 = Search(search="1986 Fleer Jordan")
keywords2 = Search(search=1986)
self.assertEquals(str(keywords1), keywords1.search)
self.assertNotEquals(keywords2, keywords2.search)
class TestEbayAPI(TestCase):
def test_ebay_api_request_status_code(self):
api = finding(appid='JohnHein-homepage-PRD-392e94856-07aba7fe', config_file=None, siteid='EBAY-US')
keywords = Search(search="1986 Fleer Jordan PSA 10")
api_request = {'keywords':keywords, 'itemFilter':[{'name':'SoldItemsOnly', 'value':True},]}
response = api.execute('findCompletedItems', api_request)
self.assertEqual(response.status_code, 200)
| 38.654545 | 107 | 0.670508 | 3,972 | 0.934149 | 0 | 0 | 0 | 0 | 0 | 0 | 479 | 0.112653 |
a0e28476be0fa65ebedd554ed275a8386f751e73 | 869 | py | Python | tests/string/generate_string.py | om719/Bloom-Filter-CPP | 8093448b3ea357831b6de25aee9e0e7271b762fa | [
"MIT"
]
| 3 | 2021-05-31T18:41:34.000Z | 2021-06-01T04:44:15.000Z | tests/string/generate_string.py | om719/Bloom-Filter-CPP | 8093448b3ea357831b6de25aee9e0e7271b762fa | [
"MIT"
]
| null | null | null | tests/string/generate_string.py | om719/Bloom-Filter-CPP | 8093448b3ea357831b6de25aee9e0e7271b762fa | [
"MIT"
]
| 2 | 2021-05-31T18:41:48.000Z | 2021-05-31T18:47:14.000Z | from key_generator.key_generator import generate
all_sizes_required = [(100, '100'), (500, '500'), (1000, '1K'), (5000, '5K'), (10000, '10K'), (50000, '50K'), (100000, '100K'), (500000, '500K')]
for file_size in all_sizes_required:
OUTPUT_PATH = "./string_test_" + file_size[1] + ".txt"
STRING_COUNT = file_size[0]
output_file = open(OUTPUT_PATH, "w")
for i in range(STRING_COUNT):
string = ""
recipient = generate(
num_of_atom = 1,
type_of_value = "hex",
capital = "mix",
extras = ["-", "_"],
seed = i
).get_key()
domain = generate(
num_of_atom = 2,
separator = ".",
min_atom_len = 3,
max_atom_len = 5,
type_of_value = "hex",
capital = "mix",
extras = ["-"],
seed = i
).get_key()
string = recipient + "@" + domain
output_file.write(string + "\n")
output_file.close()
print("Done with " + OUTPUT_PATH)
| 22.868421 | 145 | 0.611047 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.135788 |
a0e444f5e01631d54753ab517309246502cc9089 | 4,950 | py | Python | resources/portfolio_book.py | basgir/bibliotek | 42456ced804a2c9570227b393de662847283c76f | [
"MIT"
]
| null | null | null | resources/portfolio_book.py | basgir/bibliotek | 42456ced804a2c9570227b393de662847283c76f | [
"MIT"
]
| null | null | null | resources/portfolio_book.py | basgir/bibliotek | 42456ced804a2c9570227b393de662847283c76f | [
"MIT"
]
| null | null | null | ###########################################
# Author : Bastien Girardet, Deborah De Wolff
# Date : 13.05.2018
# Course : Applications in Object-oriented Programming and Databases
# Teachers : Binswanger Johannes, Zürcher Ruben
# Project : Bibliotek
# Name : portfolio_book.py Portfolio_book Flask_restful resource
# #########################################
from flask_restful import Resource, reqparse
from models.portfolio_book import PortfolioBookModel
from models.book import BookModel
class PortfolioBook(Resource):
"""PortfolioBook. Resource that helps with dealing with Http request for a portfolio_book provided an id.
HTTP GET call : /portfolios/<int:portfolioId>/books
HTTP DELETE call : /portfolios/<int:portfolioId>/books
"""
# we parse the args
parser = reqparse.RequestParser()
# The parser require some arguments that ifnot fulfilled, return an error
parser.add_argument('bookId',
type=int,
required=True,
help="Each relation does have a book id"
)
parser.add_argument('portfolioId',
type=int,
required=True,
help="Each relation does have a portfolio id"
)
def get(self, portfolioId):
"""GET request that deals with requests that look for a portfolio book relation given a portfolioId"""
# Call the model to find the portfolio book relations that has a specific portfolio Id
portfolio_book = PortfolioBookModel.find_by_portfolio_id(portfolioId)
# If found
if portfolio_book:
# We return the list of relations as json
return {'Portfolio Book of Portfolio {}'.format(portfolioId): list(map(lambda x: x.json(), portfolio_book))}, 201
else:
# If not found we return an error
return {'message': 'This portofolio does not exist or does not have any book in the portfolio'}, 404
def delete(self, portfolioId):
"""DELETE request that deals with the deletion of all relations that belongs to a portfolioId"""
# Call the model to find all entries that have a certain portfolioId
portfolio_book = PortfolioBookModel.find_by_portfolio_id(portfolioId)
# if found
if portfolio_book:
# we delete
portfolio_book.delete_from_db()
return {"Portfolio relations deleted"}, 201
else:
# Else error
return {'message': 'This Portfolio relations does not exist or does not have any book in the portfolio'}, 404
class PortfolioBookList(Resource):
"""Portfoliobook. Resource that deals with requests that insert new portfolio _ book relations into the database.
HTTP GET call : /portoflio/books
"""
def get(self):
"""GET request that returns the list of all the portfolio book relations"""
# return all as json
return {'Portfolio Books': list(map(lambda x: x.json(), PortfolioBookModel.query.all()))},200
class PortfolioBookEdit(Resource):
"""Book. Resource that helps with dealing with Http request that create or delete portfolio book relations provided a portfolioId and bookId.
HTTP POST call : /portfolios/<int:portfolioId>/books/<int:bookId>
HTTP DELETE call : /portfolios/<int:portfolioId>/books/<int:bookId>
"""
def post(self, portfolioId, bookId):
"""POST request create a portfolio_book relation provided a portfolioId and a bookId"""
relation = PortfolioBookModel.does_this_relation_exists(portfolioId, bookId)
# Check if the relation already exists
if relation:
return {"message": "The relation already exists"}, 500
else:
try:
# Call the model by providing the two arguments
relation = PortfolioBookModel(bookId,portfolioId)
# Save and commit
relation.save_to_db()
except:
return {"message": "An error occurred inserting the relation portfolio_book. Check whether the book or the portofolio do exist"}, 500
# return the json
return relation.json(), 201
def delete(self, portfolioId, bookId):
"""DELETE request that delete a portfolio_book relation provided a portfolioId and a bookId"""
# Fetch the relation
relation = PortfolioBookModel.find_by_portfolio_and_book(portfolioId, bookId)
# if exists
if relation:
try:
# we delete it
relation.delete_from_db()
return {'message': 'Relation deleted'}
except:
return {'message': 'Error while deleting the relation.'}
else:
# if not found
return {'message' : 'Relation not found'}, 404
| 40.57377 | 149 | 0.625051 | 4,442 | 0.897192 | 0 | 0 | 0 | 0 | 0 | 0 | 2,676 | 0.540497 |
a0e4dae891748b8a01307ae7aac7bc7715d4cc4e | 9,199 | py | Python | examples/the-feeling-of-success/run_experiments.py | yujialuo/erdos | 7a631b55895f1a473b0f4d38a0d6053851e65b5d | [
"Apache-2.0"
]
| null | null | null | examples/the-feeling-of-success/run_experiments.py | yujialuo/erdos | 7a631b55895f1a473b0f4d38a0d6053851e65b5d | [
"Apache-2.0"
]
| null | null | null | examples/the-feeling-of-success/run_experiments.py | yujialuo/erdos | 7a631b55895f1a473b0f4d38a0d6053851e65b5d | [
"Apache-2.0"
]
| null | null | null | import logging
from absl import app
from sensor_msgs.msg import Image
from insert_table_op import InsertTableOperator
from insert_block_op import InsertBlockOperator
from init_robot_op import InitRobotOperator
from gel_sight_op import GelSightOperator
from mock_loc_obj_op import MockLocateObjectOperator
from goto_xyz_op import GoToXYZOperator
from move_above_object_op import MoveAboveObjectOperator
from mock_gripper_op import MockGripperOperator
from mock_grasp_object_op import MockGraspObjectOperator
from raise_object_op import RaiseObjectOperator
from mock_predict_grip_op import MockPredictGripOperator
from random_position_op import RandomPositionOperator
from mock_ungrasp_object_op import MockUngraspObjectOperator
import erdos.graph
from erdos.ros.ros_subscriber_op import ROSSubscriberOp
logger = logging.getLogger(__name__)
table_init_arguments = {"_x": 0.75, "_y": 0.0, "_z": 0.0, "ref_frame": "world"}
block_init_arguments = {
"_x": 0.4225,
"_y": 0.1265,
"_z": 0.7725,
"ref_frame": "world"
}
robot_init_arguments = {
"joint_angles": {
'right_j0': -0.041662954890248294,
'right_j1': -1.0258291091425074,
'right_j2': 0.0293680414401436,
'right_j3': 2.17518162913313,
'right_j4': -0.06703022873354225,
'right_j5': 0.3968371433926965,
'right_j6': 1.7659649178699421
},
"limb_name": "right"
}
def construct_graph(graph):
logger.info("Starting the construction of the graph.")
# First, insert the table in the world.
insert_table_op = graph.add(
InsertTableOperator, init_args=table_init_arguments)
# Now, insert the block in the world.
insert_block_op = graph.add(
InsertBlockOperator, init_args=block_init_arguments)
graph.connect([insert_table_op], [insert_block_op])
# Initialize the robot and move it to the rest position.
init_robot_op = graph.add(
InitRobotOperator, init_args=robot_init_arguments)
graph.connect([insert_block_op], [init_robot_op])
# Initialize the gelsight operators and connect them to the rostopics.
gel_sight_topics = [("/gelsightA/image_raw", Image, "gelsightA"),
("/gelsightB/image_raw", Image, "gelsightB")]
ros_gel_sight_op = graph.add(
ROSSubscriberOp,
name='ros_gel_sight',
init_args={'ros_topics_type': gel_sight_topics},
setup_args={'ros_topics_type': gel_sight_topics})
gel_sight_a = graph.add(
GelSightOperator,
name="gelsight-a-op",
init_args={'output_name': "gelsight-stream-a"},
setup_args={
'input_name': "gelsightA",
'output_name': "gelsight-stream-a"
})
gel_sight_b = graph.add(
GelSightOperator,
name="gelsight-b-op",
init_args={'output_name': "gelsight-stream-b"},
setup_args={
'input_name': "gelsightB",
'output_name': "gelsight-stream-b"
})
graph.connect([ros_gel_sight_op], [gel_sight_a])
graph.connect([ros_gel_sight_op], [gel_sight_b])
# Retrieve the kinect images from the rostopics and feed them to the
# object locator.
ros_kinect_topics = [("/kinectA/image_raw", Image, "image-stream"),
("/kinectA/depth_raw", Image, "depth-stream")]
ros_kinect_op = graph.add(
ROSSubscriberOp,
name='ros_kinect',
init_args={'ros_topics_type': ros_kinect_topics},
setup_args={'ros_topics_type': ros_kinect_topics})
locate_object_op = graph.add(
MockLocateObjectOperator,
name='locate-object-op',
init_args={
'image_stream_name': 'image-stream',
'depth_stream_name': 'depth-stream',
'trigger_stream_name': InitRobotOperator.stream_name
},
setup_args={
'image_stream_name': 'image-stream',
'depth_stream_name': 'depth-stream',
'trigger_stream_name': InitRobotOperator.stream_name
})
graph.connect([ros_kinect_op, init_robot_op], [locate_object_op])
# Move the Sawyer arm above the detected object.
goto_xyz_move_above_op = graph.add(
GoToXYZOperator,
name='goto-xyz',
init_args={
'limb_name': 'right',
'output_stream_name': 'goto-move-above'
},
setup_args={
'input_stream_name': MoveAboveObjectOperator.goto_stream_name,
'output_stream_name': 'goto-move-above'
})
move_above_object_op = graph.add(
MoveAboveObjectOperator,
name='controller',
setup_args={
'trigger_stream_name': MockLocateObjectOperator.stream_name,
'goto_xyz_stream_name': 'goto-move-above'
})
graph.connect([locate_object_op, goto_xyz_move_above_op],
[move_above_object_op])
graph.connect([move_above_object_op], [goto_xyz_move_above_op])
# Closes the gripper.
gripper_close_op = graph.add(
MockGripperOperator,
name="gripper-close-op",
init_args={
'gripper_speed': 0.25,
'output_stream_name': 'gripper_close_stream'
},
setup_args={
'gripper_stream': MockGraspObjectOperator.gripper_stream,
'output_stream_name': 'gripper_close_stream'
})
grasp_object_op = graph.add(
MockGraspObjectOperator,
name='mock-grasp-object',
setup_args={
'trigger_stream_name': MoveAboveObjectOperator.stream_name,
'gripper_stream_name': 'gripper_close_stream'
})
graph.connect([move_above_object_op, gripper_close_op], [grasp_object_op])
graph.connect([grasp_object_op], [gripper_close_op])
# Raises the object.
raise_object_op = graph.add(
RaiseObjectOperator,
name='raise-object',
setup_args={
'location_stream_name': MockLocateObjectOperator.stream_name,
'trigger_stream_name': MockGraspObjectOperator.
action_complete_stream_name
})
goto_xyz_raise_op = graph.add(
GoToXYZOperator,
name="goto-xyz-raise",
init_args={
'limb_name': 'right',
'output_stream_name': 'goto_xyz_raise'
},
setup_args={
'input_stream_name': RaiseObjectOperator.stream_name,
'output_stream_name': 'goto_xyz_raise'
})
graph.connect([locate_object_op, grasp_object_op], [raise_object_op])
graph.connect([raise_object_op], [goto_xyz_raise_op])
# Predicts whether the grip was successful using the gelsight cameras.
predict_grip_op = graph.add(
MockPredictGripOperator,
name='predict-grip-op',
setup_args={
'gel_sight_a_stream_name': 'gelsight-stream-a',
'gel_sight_b_stream_name': 'gelsight-stream-b',
'trigger_stream_name': 'goto_xyz_raise'
})
graph.connect([gel_sight_a, gel_sight_b, goto_xyz_raise_op],
[predict_grip_op])
# If the grip is successful, we return it to a random location.
random_position_op = graph.add(
RandomPositionOperator,
name="random-pos-op",
setup_args={
'locate_object_stream_name': MockLocateObjectOperator.stream_name,
'trigger_stream_name': MockPredictGripOperator.success_stream_name,
'goto_xyz_stream_name': 'goto_random_pos'
})
goto_xyz_random_op = graph.add(
GoToXYZOperator,
name="goto-xyz-random",
init_args={
'limb_name': 'right',
'output_stream_name': 'goto_random_pos'
},
setup_args={
'input_stream_name': RandomPositionOperator.position_stream_name,
'output_stream_name': 'goto_random_pos'
})
graph.connect([locate_object_op, predict_grip_op, goto_xyz_random_op],
[random_position_op])
graph.connect([random_position_op], [goto_xyz_random_op])
# Now, ungrasp the object.
gripper_open_op = graph.add(
MockGripperOperator,
name="gripper-open-op",
init_args={
'gripper_speed': 0.25,
'output_stream_name': 'gripper_open_stream'
},
setup_args={
'gripper_stream': MockUngraspObjectOperator.gripper_stream,
'output_stream_name': 'gripper_open_stream'
})
ungrasp_object_op = graph.add(
MockUngraspObjectOperator,
name = "ungrasp-object-op",
setup_args = {
'trigger_stream_name': RandomPositionOperator.\
action_complete_stream_name,
'gripper_stream_name': 'gripper_open_stream'
})
graph.connect([random_position_op, gripper_open_op], [ungrasp_object_op])
graph.connect([ungrasp_object_op], [gripper_open_op])
logger.info("Finished constructing the execution graph!")
def main(argv):
# Create the graph.
graph = erdos.graph.get_current_graph()
construct_graph(graph)
# Execute the graph.
graph.execute("ros")
try:
while True:
pass
except KeyboardInterrupt:
pass
if __name__ == "__main__":
app.run(main)
| 35.245211 | 79 | 0.655941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,636 | 0.286553 |
a0e5feb7c20a84c78be8423f81add0bb2c5c4589 | 2,686 | py | Python | junction/tickets/migrations/0001_initial.py | theSage21/junction | ac713edcf56c41eb3f066da776a0a5d24e55b46a | [
"MIT"
]
| 192 | 2015-01-12T06:21:24.000Z | 2022-03-10T09:57:37.000Z | junction/tickets/migrations/0001_initial.py | theSage21/junction | ac713edcf56c41eb3f066da776a0a5d24e55b46a | [
"MIT"
]
| 621 | 2015-01-01T09:19:17.000Z | 2021-05-28T09:27:35.000Z | junction/tickets/migrations/0001_initial.py | theSage21/junction | ac713edcf56c41eb3f066da776a0a5d24e55b46a | [
"MIT"
]
| 207 | 2015-01-05T16:39:06.000Z | 2022-02-15T13:18:15.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import jsonfield.fields
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Ticket",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"created_at",
models.DateTimeField(auto_now_add=True, verbose_name="Created At"),
),
(
"modified_at",
models.DateTimeField(
auto_now=True, verbose_name="Last Modified At"
),
),
("order_no", models.CharField(max_length=255)),
("order_cost", models.FloatField()),
("ticket_no", models.CharField(max_length=255)),
("name", models.CharField(max_length=255)),
("email", models.EmailField(max_length=75)),
("city", models.CharField(max_length=255, null=True, blank=True)),
("zipcode", models.IntegerField(null=True, blank=True)),
("address", models.CharField(max_length=255, null=True, blank=True)),
("status", models.CharField(max_length=255)),
("others", jsonfield.fields.JSONField()),
(
"created_by",
models.ForeignKey(
related_name="created_ticket_set",
verbose_name="Created By",
blank=True,
on_delete=models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
(
"modified_by",
models.ForeignKey(
related_name="updated_ticket_set",
verbose_name="Modified By",
blank=True,
on_delete=models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
],
options={"abstract": False},
bases=(models.Model,),
),
]
| 35.813333 | 87 | 0.44341 | 2,520 | 0.938198 | 0 | 0 | 0 | 0 | 0 | 0 | 280 | 0.104244 |
a0e63766143621d523ba6066faa521d14ec9c390 | 1,300 | py | Python | src/bin/calc_stats.py | sw005320/PytorchWaveNetVocoder | b92d7af7d5f2794291e0d462694c0719f75ca469 | [
"Apache-2.0"
]
| 1 | 2021-01-18T06:22:30.000Z | 2021-01-18T06:22:30.000Z | src/bin/calc_stats.py | sw005320/PytorchWaveNetVocoder | b92d7af7d5f2794291e0d462694c0719f75ca469 | [
"Apache-2.0"
]
| null | null | null | src/bin/calc_stats.py | sw005320/PytorchWaveNetVocoder | b92d7af7d5f2794291e0d462694c0719f75ca469 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 Tomoki Hayashi (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import print_function
import argparse
import numpy as np
from sklearn.preprocessing import StandardScaler
from utils import read_hdf5
from utils import read_txt
from utils import write_hdf5
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--feats", default=None, required=True,
help="name of the list of hdf5 files")
parser.add_argument(
"--stats", default=None, required=True,
help="filename of hdf5 format")
args = parser.parse_args()
# read list and define scaler
filenames = read_txt(args.feats)
scaler = StandardScaler()
print("number of training utterances =", len(filenames))
# process over all of data
for filename in filenames:
feat = read_hdf5(filename, "/feat_org")
scaler.partial_fit(feat[:, 1:])
# add uv term
mean = np.zeros((feat.shape[1]))
scale = np.ones((feat.shape[1]))
mean[1:] = scaler.mean_
scale[1:] = scaler.scale_
# write to hdf5
write_hdf5(args.stats, "/mean", mean)
write_hdf5(args.stats, "/scale", scale)
if __name__ == "__main__":
main()
| 24.074074 | 60 | 0.665385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 381 | 0.293077 |
a0e69b24115a09b931149b369f1062a566ff2b61 | 727 | py | Python | python/p002.py | RUiNtheExtinct/project-euler | 5c3e64c7dfcbf52d5213df88d2310550f4ee9ce1 | [
"MIT"
]
| null | null | null | python/p002.py | RUiNtheExtinct/project-euler | 5c3e64c7dfcbf52d5213df88d2310550f4ee9ce1 | [
"MIT"
]
| null | null | null | python/p002.py | RUiNtheExtinct/project-euler | 5c3e64c7dfcbf52d5213df88d2310550f4ee9ce1 | [
"MIT"
]
| null | null | null | # from decimal import Decimal
import collections as coll
import sys
import math as mt
# import random as rd
# import bisect as bi
import time
sys.setrecursionlimit(1000000)
# import numpy as np
def uno():
return int(sys.stdin.readline().strip())
def dos():
return sys.stdin.readline().strip()
def tres():
return map(int, sys.stdin.readline().strip().split())
def cuatro():
return sys.stdin.readline().strip().split()
# Starting Time
time1 = time.time()
######## CODE STARTS FROM HERE ########
n = uno()
a, b, c, ans = 0, 1, 0, 0
while c <= n:
c = a + b
if ~c & 1:
ans += c
b, a = c, b
print(ans)
# End Time
time2 = time.time()
print("\nTime Taken:", (time2 - time1) * 1000)
| 14.836735 | 57 | 0.612105 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 170 | 0.233838 |
a0e7af4439dc68e76e3dc02f0c28bddc41d0fe5c | 7,662 | py | Python | robosuite/models/objects/xml_objects.py | ClaireLC/robosuite | b5c37f1110aefc02106ffd2aed0dfb106bc1bb33 | [
"MIT"
]
| 1 | 2021-12-22T13:10:46.000Z | 2021-12-22T13:10:46.000Z | robosuite/models/objects/xml_objects.py | wangcongrobot/robosuite-jr | 738be7a3a83447e78763f6a082faafc8b479c95d | [
"MIT"
]
| null | null | null | robosuite/models/objects/xml_objects.py | wangcongrobot/robosuite-jr | 738be7a3a83447e78763f6a082faafc8b479c95d | [
"MIT"
]
| 1 | 2020-12-29T01:38:01.000Z | 2020-12-29T01:38:01.000Z | from robosuite.models.objects import MujocoXMLObject
from robosuite.utils.mjcf_utils import xml_path_completion, array_to_string, string_to_array
class BottleObject(MujocoXMLObject):
"""
Bottle object
"""
def __init__(self):
super().__init__(xml_path_completion("objects/bottle.xml"))
class CanObject(MujocoXMLObject):
"""
Coke can object (used in SawyerPickPlace)
"""
def __init__(self):
super().__init__(xml_path_completion("objects/can.xml"))
class LemonObject(MujocoXMLObject):
"""
Lemon object
"""
def __init__(self):
super().__init__(xml_path_completion("objects/lemon.xml"))
class MilkObject(MujocoXMLObject):
"""
Milk carton object (used in SawyerPickPlace)
"""
def __init__(self):
super().__init__(xml_path_completion("objects/milk.xml"))
class BreadObject(MujocoXMLObject):
"""
Bread loaf object (used in SawyerPickPlace)
"""
def __init__(self):
super().__init__(xml_path_completion("objects/bread.xml"))
class CerealObject(MujocoXMLObject):
"""
Cereal box object (used in SawyerPickPlace)
"""
def __init__(self):
super().__init__(xml_path_completion("objects/cereal.xml"))
class SquareNutObject(MujocoXMLObject):
"""
Square nut object (used in SawyerNutAssembly)
"""
def __init__(self):
super().__init__(xml_path_completion("objects/square-nut.xml"))
class RoundNutObject(MujocoXMLObject):
"""
Round nut (used in SawyerNutAssembly)
"""
def __init__(self):
super().__init__(xml_path_completion("objects/round-nut.xml"))
class MilkVisualObject(MujocoXMLObject):
"""
Visual fiducial of milk carton (used in SawyerPickPlace).
Fiducial objects are not involved in collision physics.
They provide a point of reference to indicate a position.
"""
def __init__(self):
super().__init__(xml_path_completion("objects/milk-visual.xml"))
class BreadVisualObject(MujocoXMLObject):
"""
Visual fiducial of bread loaf (used in SawyerPickPlace)
"""
def __init__(self):
super().__init__(xml_path_completion("objects/bread-visual.xml"))
class CerealVisualObject(MujocoXMLObject):
"""
Visual fiducial of cereal box (used in SawyerPickPlace)
"""
def __init__(self):
super().__init__(xml_path_completion("objects/cereal-visual.xml"))
class CanVisualObject(MujocoXMLObject):
"""
Visual fiducial of coke can (used in SawyerPickPlace)
"""
def __init__(self):
super().__init__(xml_path_completion("objects/can-visual.xml"))
class PlateWithHoleObject(MujocoXMLObject):
"""
Square plate with a hole in the center (used in BaxterPegInHole)
"""
def __init__(self):
super().__init__(xml_path_completion("objects/plate-with-hole.xml"))
class DoorPullNoLatchObject(MujocoXMLObject):
"""
Door: pull with no latch
"""
def __init__(self):
#super().__init__(xml_path_completion("objects/door_dapg.xml"))
super().__init__(xml_path_completion("objects/door_pull_no_latch.xml"))
def set_goal_xpos(self, x_delta, y_delta):
""" Sets x,y position of goal site in door model with x and y offset from door center"""
door_center_site = self.worldbody.find("./body/body/body/site[@name='door_center']")
door_center_pos = string_to_array(door_center_site.get("pos"))
goal_site = self.worldbody.find("./body/body/body/site[@name='goal']")
goal_site.set("pos", array_to_string([door_center_pos[0] + x_delta, door_center_pos[1] + y_delta, -1.0]))
@property
def handle_contact_geoms(self):
return[
"handle_base",
"handle",
]
@property
def door_contact_geoms(self):
return[
"door_box",
"door_r_cyl",
"door_l_cyl",
"l_frame",
"r_frame",
]
class DoorPullWithLatchObject(MujocoXMLObject):
"""
Door: pull with latch
"""
def __init__(self):
#super().__init__(xml_path_completion("objects/door_dapg.xml"))
super().__init__(xml_path_completion("objects/door_pull_with_latch.xml"))
def set_goal_xpos(self, x_delta, y_delta):
""" Sets x,y position of goal site in door model with x and y offset from door center"""
door_center_site = self.worldbody.find("./body/body/body/site[@name='door_center']")
door_center_pos = string_to_array(door_center_site.get("pos"))
goal_site = self.worldbody.find("./body/body/body/site[@name='goal']")
goal_site.set("pos", array_to_string([door_center_pos[0] + x_delta, door_center_pos[1] + y_delta, -1.0]))
@property
def handle_contact_geoms(self):
return[
"handle_base",
"handle",
]
@property
def door_contact_geoms(self):
return[
"door_box",
"door_r_cyl",
"door_l_cyl",
"l_frame",
"r_frame",
]
class DoorPullNoLatchRoomObject(MujocoXMLObject):
"""
Door: pull with latch with walls
"""
def __init__(self):
super().__init__(xml_path_completion("objects/door_pull_no_latch_room.xml"))
def set_goal_xpos(self, x_delta, y_delta):
""" Sets x,y position of goal site in door model with x and y offset from door center"""
door_center_site = self.worldbody.find("./body/body/body/site[@name='door_center']")
door_center_pos = string_to_array(door_center_site.get("pos"))
goal_site = self.worldbody.find("./body/body/body/site[@name='goal']")
goal_site.set("pos", array_to_string([door_center_pos[0] + x_delta, door_center_pos[1] + y_delta, -1.0]))
@property
def handle_contact_geoms(self):
return[
"handle_base",
"handle",
]
@property
def door_contact_geoms(self):
return[
"door_box",
"door_r_cyl",
"door_l_cyl",
"l_frame",
"r_frame",
]
@property
def wall_contact_geoms(self):
return[
"wall_g0",
"wall_g1",
"wall_g2",
"wall_g3",
]
class DoorPullNoLatchRoomWideObject(MujocoXMLObject):
"""
Door: pull with no latch with walls
"""
def __init__(self):
super().__init__(xml_path_completion("objects/door_pull_no_latch_room_wide.xml"))
def set_goal_xpos(self, x_delta, y_delta):
""" Sets x,y position of goal site in door model with x and y offset from door center"""
door_center_site = self.worldbody.find("./body/body/body/site[@name='door_center']")
door_center_pos = string_to_array(door_center_site.get("pos"))
goal_site = self.worldbody.find("./body/body/body/site[@name='goal']")
goal_site.set("pos", array_to_string([door_center_pos[0] + x_delta, door_center_pos[1] + y_delta, -1.0]))
@property
def handle_contact_geoms(self):
return[
"handle_base",
"handle",
]
@property
def door_contact_geoms(self):
return[
"door_box",
"door_r_cyl",
"door_l_cyl",
"l_frame",
"r_frame",
]
@property
def wall_contact_geoms(self):
return[
"wall_g0",
"wall_g1",
"wall_g2",
"wall_g3",
]
class EmptyWithGoalObject(MujocoXMLObject):
"""
Empty arena with goal site
"""
def __init__(self):
super().__init__(xml_path_completion("objects/empty_with_goal.xml"))
def set_goal_xpos(self, x_delta, y_delta):
""" Sets x,y position of goal site in door model with x and y offset from door center"""
goal_site = self.worldbody.find("./body/body/site[@name='goal']")
goal_site.set("pos", array_to_string([x_delta, y_delta, 0]))
| 26.512111 | 111 | 0.658053 | 7,467 | 0.97455 | 0 | 0 | 1,362 | 0.17776 | 0 | 0 | 2,891 | 0.377317 |
a0e9174ff5dee90055733752e0b8cd4f3423f64e | 1,654 | py | Python | SoftUni-Python-Programming-Course/Exam-Preparation/medicines_in_carton.py | vladislav-karamfilov/Python-Playground | ed83a693d37ff0c1565ece49d2a5d9ecd32c9aac | [
"MIT"
]
| 1 | 2019-04-07T23:10:27.000Z | 2019-04-07T23:10:27.000Z | SoftUni-Python-Programming-Course/Exam-Preparation/medicines_in_carton.py | vladislav-karamfilov/Python-Playground | ed83a693d37ff0c1565ece49d2a5d9ecd32c9aac | [
"MIT"
]
| null | null | null | SoftUni-Python-Programming-Course/Exam-Preparation/medicines_in_carton.py | vladislav-karamfilov/Python-Playground | ed83a693d37ff0c1565ece49d2a5d9ecd32c9aac | [
"MIT"
]
| null | null | null | # Problem description: http://python3.softuni.bg/student/lecture/assignment/56b749af7e4f59b649b7e626/
class Medicine:
def __init__(self, name, w, h, d):
self.name = name
self.w = w
self.h = h
self.d = d
def can_be_put_in_carton(self, carton_w, carton_h, carton_d):
sorted_medicine_dimensions = sorted([self.w, self.h, self.d])
sorted_carton_dimensions = sorted([carton_w, carton_h, carton_d])
return all(sorted_medicine_dimensions[d] <= sorted_carton_dimensions[d] for d in range(3))
def read_medicines(medicines_file_path):
result = []
with open(medicines_file_path, encoding='utf-8') as f:
for line in f:
if line:
medicine_info = line.split(',')
medicine_name = ''.join(medicine_info[:-3])
medicine_w = float(medicine_info[-3])
medicine_h = float(medicine_info[-2])
medicine_d = float(medicine_info[-1])
result.append(Medicine(medicine_name, medicine_w, medicine_h, medicine_d))
return result
def main():
try:
carton_w = float(input())
carton_h = float(input())
carton_d = float(input())
medicines_file_path = input()
medicines = read_medicines(medicines_file_path)
except:
print('INVALID INPUT')
return
medicines_that_can_be_put_in_carton = \
[medicine for medicine in medicines if medicine.can_be_put_in_carton(carton_w, carton_h, carton_d)]
for medicine in medicines_that_can_be_put_in_carton:
print(medicine.name)
if __name__ == '__main__':
main()
| 29.535714 | 107 | 0.638452 | 447 | 0.270254 | 0 | 0 | 0 | 0 | 0 | 0 | 138 | 0.083434 |
a0e9473241e626ba8085d5563079fd7bc9d6eeb6 | 1,111 | py | Python | var/app_template/views.py | michailbrynard/django-skeleton | 772cd579cad1b8853ed6f1a2c14cbacac2ba41da | [
"MIT"
]
| null | null | null | var/app_template/views.py | michailbrynard/django-skeleton | 772cd579cad1b8853ed6f1a2c14cbacac2ba41da | [
"MIT"
]
| null | null | null | var/app_template/views.py | michailbrynard/django-skeleton | 772cd579cad1b8853ed6f1a2c14cbacac2ba41da | [
"MIT"
]
| null | null | null | # LOGGING
# ---------------------------------------------------------------------------------------------------------------------#
import logging
logger = logging.getLogger('django')
# IMPORTS
# ---------------------------------------------------------------------------------------------------------------------#
# shortcuts
from django.shortcuts import render
# contrib.auth
from django.contrib.auth.decorators import login_required
# views.generic
from django.views.generic import DetailView
#
from .models import *
# GENERIC CLASS BASED VIEWS
# ---------------------------------------------------------------------------------------------------------------------#
class BasicModelView(DetailView):
model = BasicModel
# CUSTOM VIEWS
# ---------------------------------------------------------------------------------------------------------------------#
@login_required
def about(request):
# Context variables to pass on to template
context = {
'val1': 'Hello',
'val2': 'World!'
}
# Render the template
return render(request, 'app_name/about_page.html', context) | 32.676471 | 120 | 0.407741 | 56 | 0.050405 | 0 | 0 | 245 | 0.220522 | 0 | 0 | 705 | 0.634563 |
a0e9bc2b96c3d8a0da5092d2ce1abf89a56a046d | 858 | py | Python | circuitpy_examples/week1/04_ramp_LED_brightness.py | WSU-Physics/phys150 | 043ebf8212b56a988ef8e41a4464400bec5a7dc1 | [
"MIT"
]
| null | null | null | circuitpy_examples/week1/04_ramp_LED_brightness.py | WSU-Physics/phys150 | 043ebf8212b56a988ef8e41a4464400bec5a7dc1 | [
"MIT"
]
| null | null | null | circuitpy_examples/week1/04_ramp_LED_brightness.py | WSU-Physics/phys150 | 043ebf8212b56a988ef8e41a4464400bec5a7dc1 | [
"MIT"
]
| null | null | null | # Adam Beardsley
# starting from from adafruit example
# https://learn.adafruit.com/welcome-to-circuitpython/creating-and-editing-code
#
import board
import digitalio
import time
led = digitalio.DigitalInOut(board.LED)
led.direction = digitalio.Direction.OUTPUT
ramp_time = 3 # Time to ramp up, in seconds
period = 0.01 # Time per cycle, in seconds
step = period / ramp_time # how much to increment the brightness each cycle
while True:
brightness = 0 # Start off
while brightness < 1:
T_on = brightness * period
T_off = period - T_on
led.value = True
time.sleep(T_on)
led.value = False
time.sleep(T_off)
brightness += step
# Convince yourself the expression for step (line 14) is correct
# How can you *test* that step is correct?
# Can you reverse the program (start bright, get dim)
| 28.6 | 79 | 0.698135 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 409 | 0.47669 |
a0ead277852aac4f9b24d58dbb1630e69b9f9cac | 1,099 | py | Python | __main__.py | Makeeyaf/SiteChecker | 969bdedd2d5df36220ff9fcc41e44cf1db0cca00 | [
"MIT"
]
| 1 | 2021-01-06T01:45:41.000Z | 2021-01-06T01:45:41.000Z | __main__.py | Makeeyaf/SiteChecker | 969bdedd2d5df36220ff9fcc41e44cf1db0cca00 | [
"MIT"
]
| 2 | 2021-01-03T13:25:39.000Z | 2021-01-03T15:57:01.000Z | __main__.py | Makeeyaf/SiteChecker | 969bdedd2d5df36220ff9fcc41e44cf1db0cca00 | [
"MIT"
]
| null | null | null | import argparse
from site_checker import SiteChecker
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Check sites text.")
parser.add_argument("config", type=str, nargs=1, help="Path to config json file.")
parser.add_argument(
"-a",
dest="apiKey",
type=str,
nargs=1,
required=True,
help="Pushbullet API key.",
)
parser.add_argument(
"-m", dest="maxFailCount", type=int, nargs=1, help="Max fail count."
)
parser.add_argument(
"-u", dest="updateCycle", type=int, nargs=1, help="Update cycle in second"
)
parser.add_argument(
"-v", dest="isVerbose", action="store_true", help="Verbose mode."
)
parser.add_argument(
"-q",
dest="isQuiet",
action="store_true",
help="Quiet mode. Does not call pushbullet",
)
args = parser.parse_args()
k = SiteChecker(
args.config[0],
args.apiKey[0],
args.isQuiet,
args.isVerbose,
args.maxFailCount,
args.updateCycle,
)
k.check()
| 26.166667 | 86 | 0.586897 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 278 | 0.252957 |
a0eb34e703fb20df0982cbdc1702ff56c69d7bb6 | 1,563 | py | Python | autop-listener/autop-listener.py | yuriel-v/ansible | f6e8fcb1edfbef550da2fe217cfd84941523f692 | [
"MIT"
]
| null | null | null | autop-listener/autop-listener.py | yuriel-v/ansible | f6e8fcb1edfbef550da2fe217cfd84941523f692 | [
"MIT"
]
| null | null | null | autop-listener/autop-listener.py | yuriel-v/ansible | f6e8fcb1edfbef550da2fe217cfd84941523f692 | [
"MIT"
]
| null | null | null | import os
from pathlib import Path
from datetime import datetime
from json import dumps
import flask as fsk
from flask import request, jsonify, Response
app = fsk.Flask(__name__)
app.config['DEBUG'] = False
homedir = os.getenv('HOME')
@app.route('/provision', methods=['POST'])
def auto_provision():
Path(f'{homedir}/log/ansible').mkdir(parents=True, exist_ok=True)
req = request.get_json()
try:
vm_type = req.pop('type')
vm_ip = req.pop('ip')
if not isinstance(req['extras'], dict):
raise Exception("Invalid extras element type")
except Exception:
return Response('{"response": "Wrongly formatted request"}', 400)
req['extras']['global_vm_shortname'] = req['extras'].pop('desc')
req['extras']['global_vm_hostname'] = req['extras'].pop('name')
extra_vars = str(dumps(req['extras'])).replace('"', '\\"')
ansible_command = "tmux send-keys -t autopshell "
ansible_command += f"'ansible-playbook {homedir}/ansible/global.yml -i {vm_ip}, --tags \"{vm_type}\" --extra-vars \"{extra_vars}\" "
ansible_command += f"| tee {homedir}/log/ansible/{req['extras']['global_vm_hostname']}-{datetime.now().isoformat()}.log' C-m"
os.system(ansible_command)
return jsonify({'response': 'Ansible command fired'})
@app.route('/getkey', methods=['GET'])
def get_public_key():
with open(f'{homedir}/.ssh/ansible/id_ansible.pub', 'r') as pkfile:
return jsonify({'publickey': pkfile.readline().rstrip()})
if __name__ == "__main__":
app.run(host='0.0.0.0', port=4960)
| 32.5625 | 136 | 0.658989 | 0 | 0 | 0 | 0 | 1,252 | 0.801024 | 0 | 0 | 616 | 0.394114 |
a0ed35cd2a2fcaf79d84a20f492250006d069eb3 | 3,586 | py | Python | dz_se_comm.py | strebrah/Solaredge_Domoticz_Modbus | 802bfde4f4b458ad0d30d3a9433315e12e3aa837 | [
"MIT"
]
| null | null | null | dz_se_comm.py | strebrah/Solaredge_Domoticz_Modbus | 802bfde4f4b458ad0d30d3a9433315e12e3aa837 | [
"MIT"
]
| null | null | null | dz_se_comm.py | strebrah/Solaredge_Domoticz_Modbus | 802bfde4f4b458ad0d30d3a9433315e12e3aa837 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
####################################################################################################
# Created by EH (NL) https://github.com/strebrah/Solaredge_Domoticz_Modbus #
# Date: August 2020 #
# Version: 0.1 #
# Designed for python 3.7 (based on the requirements of the 'solaredge_modbus' library.) #
# Thanks to Niels for the 'solaredge_modbus' library https://pypi.org/project/solaredge-modbus/ #
# Capabilities: #
# * Creating a hardware device in Domoticz #
# * Creating sensors for the data types in Domoticz #
# * Sending the solaredge modbus data to Domoticz #
# How to use #
# 1. Enter your configuration in the 'dz_se_settings.ini' file #
# 2. configure crontab task for periodic data transfer to Domoticz. #
# example: #
# sudo crontab -e #
# for example, every minute #
# */1 * * * * /usr/bin/python3 /home/pi/domoticz/scripts/python/dz_se_comm.py #
####################################################################################################
import requests
import configparser
import time
import solaredge_modbus
from dz_se_lib import domoticz_create_hardware
from dz_se_lib import domoticz_create_devices
from dz_se_lib import domoticz_retrieve_device_idx
from dz_se_lib import domoticz_transceive_data
from dz_se_lib import get_path_to_init_file
if __name__ == "__main__":
settings = configparser.ConfigParser()
settings._interpolation = configparser.ExtendedInterpolation()
settings.read(get_path_to_init_file())
domoticz_ip = settings.get('GENERAL SETTINGS', 'domoticz_ip')
domoticz_port = settings.get('GENERAL SETTINGS', 'domoticz_port')
inverter = solaredge_modbus.Inverter(host=settings.get('GENERAL SETTINGS', 'solaredge_inverter_ip'),
port=settings.get('GENERAL SETTINGS', 'solaredge_inverter_port'), timeout=1,
unit=1)
# Get values from Solaredge inverter over TCP Modbus
if settings.get('GENERAL SETTINGS', 'domoticz_solaredge_comm_init_done') == '0':
session = requests.Session()
# SET HARDWARE IN DOMOTICZ
DOMOTICZ_HW_IDX = domoticz_create_hardware(domoticz_ip, domoticz_port, settings, session)
# CREATE DEVICES IN DOMOTICZ
domoticz_create_devices(domoticz_ip, domoticz_port, settings, session, DOMOTICZ_HW_IDX)
# GET ALL SENSOR IDX VALUES AND STORE
domoticz_retrieve_device_idx(domoticz_ip, domoticz_port, settings, session)
session.close()
else:
time.sleep(0.5)
session = requests.Session()
domoticz_transceive_data(domoticz_ip, domoticz_port, settings, session, inverter)
session.close()
| 59.766667 | 118 | 0.499721 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,202 | 0.614055 |
a0edb39559fc23e931152b94ffea25ac01150fa0 | 10,632 | py | Python | parse_mitchell.py | cfwelch/targeted_sentiment | 1c1b063339cdead8f5860df784a0fa170bcdd3ef | [
"MIT"
]
| 1 | 2020-12-28T13:51:02.000Z | 2020-12-28T13:51:02.000Z | parse_mitchell.py | cfwelch/targeted_sentiment | 1c1b063339cdead8f5860df784a0fa170bcdd3ef | [
"MIT"
]
| 2 | 2018-04-23T02:13:44.000Z | 2018-04-25T04:58:35.000Z | parse_mitchell.py | cfwelch/targeted_sentiment | 1c1b063339cdead8f5860df784a0fa170bcdd3ef | [
"MIT"
]
| null | null | null |
import senti_lexis
import datetime, string, numpy, spwrap, random time, sys, re
from sklearn import svm
from sklearn import cross_validation
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.cross_validation import KFold
from scipy.sparse import csr_matrix
def main():
for i in range(1, 11):
runfold(i)
def runfold(fold):
# read files
found_ents = open("fold" + str(fold) + "_found_entities")
train_file = open("../../data/Open Domain Targeted Sentiment/en/10-fold/train." + str(fold))
test_file = open("../../data/Open Domain Targeted Sentiment/en/10-fold/test." + str(fold))
train_lines = train_file.readlines()
test_lines = test_file.readlines()
found_lines = found_ents.readlines()
train_file.close()
test_file.close()
found_ents.close()
# evaluation metrics
ent_guessed = 0
ent_actual = 0
ent_correct = 0
# compute
utterances = list()
tlist = list()
targets = list()
sents = list()
ents = list()
for line in train_lines:
tl = line.strip()
if tl == "":
if tlist:
utterances.append(tlist)
tlist = list()
else:
if not tl.startswith("## Tweet"):
tlist.append(tl.split("\t"))
if tlist:
utterances.append(tlist)
tlist = list()
splitpoint = len(utterances)
for line in test_lines:
tl = line.strip()
if tl == "":
if tlist:
utterances.append(tlist)
tlist = list()
else:
if not tl.startswith("## Tweet"):
tlist.append(tl.split("\t"))
if tlist:
utterances.append(tlist)
#cprint("Utterances: " + str(len(utterances)))
######## figure out what to do with the predicted entities
print("LEN OF FLINES: " + str(len(found_lines)))
print("LEN OF UTTERANCES: " + str(len(utterances) - splitpoint))
found_entities = list()
for f_line in found_lines:
PTZ = f_line.strip();#.split("::::::")
if PTZ != "[]":#PTZ[0]
partf = PTZ[2:-2].split("', '");#PTZ[0]
found_entities.append(partf)
else:
found_entities.append([])
print("LEN OF PARSED FLINES: " + str(len(found_entities)))
#split tlist into targets and sents
splitpoint2 = 0
count = 0
for utt in utterances:
sent = ""
target = list()
ent = list()
f_ents = None
if count >= splitpoint:
f_ents = found_entities[count - splitpoint]
_t = None
for i in utt:
if sent != "":
sent += " "
sent += i[0]
if i[1] == "B-ORGANIZATION" or i[1] == "B-PERSON":
#if i[1][0] == "B":
target.append(i[0])
ent.append(i[2])
#print(str(i[0]) + ":" + str(i[1][0]) + ":" + str(i[2]))
## Figure out if entity was missed or not
#### debug if block
#if f_ents != None:
# print(ent)
# print(target)
# print(f_ents)
# if count == 2117:
# break
if f_ents != None:
#ent_guessed += len(f_ents);#overcounts by including _
#ent_actual += len(target);#overcounts by including _
for T_T in range(0, len(target)):
if ent[T_T] != "_" and ent[T_T] != "neutral":
ent_actual += 1
TOTEST_targets = list()
TOTEST_ents = list()
for T_T in f_ents:
if T_T in target:
idx = target.index(T_T)
TOTEST_targets.append(T_T)
TOTEST_ents.append(ent[idx])
if ent[idx] != "_" and ent[idx] != "neutral":
ent_guessed += 1
else:
ent_guessed += 1
#print(TOTEST_targets)
#print(TOTEST_ents)
ents.extend(TOTEST_ents)
for a in range(0, len(TOTEST_ents)):
sents.append(sent)
targets.extend(TOTEST_targets)
else:
ents.extend(ent)
for a in range(0, len(ent)):
sents.append(sent)
targets.extend(target)
######## split point counters
if count > splitpoint and splitpoint2 == 0:
splitpoint2 = len(targets)
count += 1
#return;########################################################################
#print("SPLIT POINT 1: " + str(splitpoint))
#print("SPLIT POINT 2: " + str(splitpoint2))
print("LEN TARGETS: " + str(len(targets)));#ntities
print("LEN SENTS: " + str(len(sents)));#sentences
print("LEN ENTS: " + str(len(ents)));#sentiments
print("LEN REAL TARGETS: " + str(len(found_entities)))#real entities
# Generate vocab
cv = set()
regex = re.compile(r"[^a-zA-Z0-9_\~\- ]+")
for sent in range(0, len(sents)):
parts = sents[sent].split(" ")
for part in range(0, len(parts)):
thepart = regex.sub("", parts[part])
# corner case for hyphens
hps = thepart.split("-")
if len(hps) > 1:
for hi in range(0, len(hps)):
cv.add(hps[hi].lower())
# end corner case for hyphens
thepart = thepart.lower()
cv.add(thepart)
for sent in range(0, len(sents)):
tokenSent = sents[sent]
tokenSent = tokenSent.replace(targets[sent], " ~~t~~ " + targets[sent])
parts = regex.sub("", tokenSent)
parts = re.split(" |-", parts)
while "" in parts:
parts.remove("")
windowFeatures = []
done = False
while not done:
for part in range(0, len(parts)):
if "~~t~~" == parts[part]:
windowFeatures += [part]
parts.remove(parts[part])
#print("parts?: " + str(parts))
break
if part == len(parts) - 1:
done = True
for part in range(0, len(parts)):
thepart = parts[part].lower()
if thepart not in cv:
cv.add(thepart)
cv = list(cv)
#cprint("Vocabulary Size: " + str(len(cv)))
# Generate the feature vectors
xtc = []
xtcT = []
train_ents = []
test_ents = []
for sent in range(0, len(sents)):
# add token boundaries to the sentence
tokenSent = sents[sent]
#print(targets[sent])
tokenSent = tokenSent.replace(targets[sent], " ~~t~~ " + targets[sent])
#print(tokenSent)
parts = regex.sub("", tokenSent)
# this handles split and hyphen corner case
parts = re.split(" |-", parts)
# remove empty parts from the sentence
while "" in parts:
parts.remove("")
# locate window feature indicies
windowFeatures = []
done = False
while not done:
for part in range(0, len(parts)):
if "~~t~~" == parts[part]:
windowFeatures += [part]
parts.remove(parts[part])
#print("parts?: " + str(parts))
break
if part == len(parts) - 1:
done = True
#print("window features: " + str(windowFeatures))
#print("parts: " + str(parts))
row = []
featureMap = {}
Nflag = 0
for part in range(0, len(parts)):
#thepart = regex.sub("", parts[part])
#thepart = thepart.lower()
thepart = parts[part].lower()
if thepart not in cv:
cv.append(thepart)
theid = cv.index(thepart)
#print(theid)
mindist = 999
for wf in range(0, len(windowFeatures)):
##############################################################
## This is the distance measure for window linear distance!
distance = abs(windowFeatures[wf] - part)
##############################################################
## This is the distance measure for dependency tree distnace!
## distance = spwrap.treeDistance(parts[windowFeatures[wf]], parts[part], dparse)
##############################################################
if distance < mindist:
mindist = distance
mindist += 1
sentiz = senti_lexis.lexCounts(thepart)
if theid in featureMap:
# featureMap[theid] += 1.0 / mindist
featureMap[theid][0] += 2.0 - mindist / 7.0
featureMap[theid][1] += (2.0 - mindist / 7.0) * sentiz[0]
featureMap[theid][2] += (2.0 - mindist / 7.0) * sentiz[1]
featureMap[theid][3] += (2.0 - mindist / 7.0) * sentiz[2]
if Nflag > 0:
featureMap[theid][4] = 1.0
else:
# featureMap[theid] = 1.0 / mindist
# count, positive, negative, neutral, negate
featureMap[theid] = [0, 0, 0, 0, 0]
featureMap[theid][0] = 2.0 - mindist / 7.0
featureMap[theid][1] = (2.0 - mindist / 7.0) * sentiz[0]
featureMap[theid][2] = (2.0 - mindist / 7.0) * sentiz[1]
featureMap[theid][3] = (2.0 - mindist / 7.0) * sentiz[2]
if Nflag > 0:
featureMap[theid][4] = 1.0
if Nflag > 0:
Nflag -= 1
if senti_lexis.lexNegate(thepart):
Nflag = 2
for i in range(0, len(cv)):
if i in featureMap:
row.extend(featureMap[i])
else:
row.extend([0, 0, 0, 0, 0])
if sent < splitpoint2:
#print("ROW: " + str(len(row)))
#print("LABEL: " + str(ents[sent]))
xtc.append(row)
train_ents.append(ents[sent])
else:
xtcT.append(row)
test_ents.append(ents[sent])
dist = numpy.array(ents)
#print((dist=="neutral").sum())
#print((dist=="negative").sum())
#print((dist=="positive").sum())
#print((dist=="_").sum())
#print("LENTR: " + str(len(xtc)))
#print("LENTE: " + str(len(xtcT)))
print("LEN TRAIN ENTS: " + str(len(train_ents)))
print("LEN TEST ENTS: " + str(len(test_ents)))
#do gridsearch + evaluation
bestC = 0
bestGamma = 0
bestScore = 0
xtest = list()
xtrain = list()
ytest = list()
ytrain = list()
# do train-test split
for j in range(0, len(xtc)):
LB = train_ents[j]
if LB != "_" and LB != "neutral":
xtrain.append(xtc[j])
ytrain.append(LB)
for j in range(0, len(xtcT)):
LB = test_ents[j]
if LB != "_" and LB != "neutral":
xtest.append(xtcT[j])
ytest.append(LB)
score = 0
print("LEN TRAIN: " + str(len(ytrain)))
print("LEN TEST: " + str(len(ytest)))
#print(xtrain)
#print(len(xtrain))
#print(len(xtrain[0]))
#print(len(xtrain[1]))
#print(len(xtrain[2]))
#print(len(xtrain[3]))
#print(len(xtrain[4]))
#for __ in xtrain:
#if len(__) != 56410:
# print len(__)
for gamma in numpy.linspace(0.0001, 0.05, 10):#10steps
for C in numpy.linspace(0.1, 10, 10):#10steps
#gamma = 0.005644444444444444
#C = 6.0
xtrain = csr_matrix(xtrain)
xtest = csr_matrix(xtest)
clf = svm.SVC(gamma=gamma, C=C)
testout = clf.fit(xtrain, ytrain)
score = clf.score(xtest, ytest)
if score > bestScore:
bestC = C
bestGamma = gamma
bestScore = score
cprint("Cross Validation Score: " + str(score))
cprint("Gamma = " + str(gamma) + " and C = " + str(C))
################ THIS IS FOR NORMAL EVALUATION ################
xtrain = csr_matrix(xtrain)
xtest = csr_matrix(xtest)
clf = svm.SVC(gamma=bestGamma, C=bestC)
testout = clf.fit(xtrain, ytrain)
bestScore = clf.score(xtest, ytest)
#print(clf.predict(xtest))
ent_correct = (clf.predict(xtest) == ytest).sum()
cprint("Actual Score: " + str(bestScore))
###############################################################
print(str(ent_guessed) + "\t" + str(ent_actual) + "\t" + str(ent_correct))
#print("ENT GUESSED: " + str(ent_guessed))
#print("ENT ACTUAL: " + str(ent_actual))
#print("ENT CORRECT: " + str(ent_correct))
def cprint(msg):
tmsg = msg
st = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
tmsg = str(st) + ": " + str(tmsg)
print(tmsg)
log_file = open("open_domain.log", "a")
log_file.write(tmsg + "\n")
log_file.flush()
log_file.close()
if __name__ == "__main__":
main()
| 28.891304 | 93 | 0.60506 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,360 | 0.316027 |
a0ee65cec9b822e4705a0e2c457a3bbab820bf6b | 1,314 | py | Python | cryptographyMachine/cryptographyMachine.py | anuranjan08/CryptoMachine | 5a1d68adbe88708f21902d1d44a636c043f6ed28 | [
"MIT"
]
| null | null | null | cryptographyMachine/cryptographyMachine.py | anuranjan08/CryptoMachine | 5a1d68adbe88708f21902d1d44a636c043f6ed28 | [
"MIT"
]
| null | null | null | cryptographyMachine/cryptographyMachine.py | anuranjan08/CryptoMachine | 5a1d68adbe88708f21902d1d44a636c043f6ed28 | [
"MIT"
]
| null | null | null | def machine():
keys='abcdefghijklmnopqrstuvwxyz !'
values=keys[-1]+keys[0:-1]
"""
In encrytpDict: In decryptDict:
keys Values keys Values
'a' '!' '!' 'a'
'b' 'a' 'a' 'b'
. . . .
. . . .
. . . .
"""
encryptDict=dict(zip(keys,values))
decryptDict=dict(zip(values,keys))
"""
Asking user for the user input and the mode.
"""
message=input("Please enter your secret message")
mode=input("Please enter your mode: Encode(E) or Decode(D)")
"""
if the mode is encryption(E)/decryption(D):
We will create a listin which we run a dictionary comprehension and
if that particular letter is there in encrytion/decryption dictionary , we will
fetch the value of that letter and we will append that to list.Similary
for other letters in the message.
"""
if mode.upper()=='E':
newMessage=''.join([encryptDict[letter] for letter in message.lower()])
elif mode.upper()=='D':
newMessage=''.join([decryptDict[letter] for letter in message.lower()])
else:
print("Please enter a correct choice")
return newMessage
print(machine())
| 27.375 | 89 | 0.547945 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 843 | 0.641553 |
a0ee8d887762a2061e866ff6d3e72e86639288e1 | 645 | py | Python | tests/test_ioeeg_abf.py | wonambi-python/wonambi | 4e2834cdd799576d1a231ecb48dfe4da1364fe3a | [
"BSD-3-Clause"
]
| 63 | 2017-12-30T08:11:17.000Z | 2022-01-28T10:34:20.000Z | tests/test_ioeeg_abf.py | wonambi-python/wonambi | 4e2834cdd799576d1a231ecb48dfe4da1364fe3a | [
"BSD-3-Clause"
]
| 23 | 2017-09-08T08:29:49.000Z | 2022-03-17T08:19:13.000Z | tests/test_ioeeg_abf.py | wonambi-python/wonambi | 4e2834cdd799576d1a231ecb48dfe4da1364fe3a | [
"BSD-3-Clause"
]
| 12 | 2017-09-18T12:48:36.000Z | 2021-09-22T07:16:07.000Z | from numpy import isnan
from wonambi import Dataset
from .paths import axon_abf_file
d = Dataset(axon_abf_file)
def test_abf_read():
assert len(d.header['chan_name']) == 1
assert d.header['start_time'].minute == 47
data = d.read_data(begtime=1, endtime=2)
assert data.data[0][0, 0] == 2.1972655922581912
markers = d.read_markers()
assert len(markers) == 0
def test_abf_boundary():
data = d.read_data(begsam=-10, endsam=5)
assert isnan(data.data[0][0, :10]).all()
n_smp = d.header['n_samples']
data = d.read_data(begsam=n_smp - 2, endsam=n_smp + 10)
assert isnan(data.data[0][0, 2:]).all()
| 21.5 | 59 | 0.662016 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.052713 |
a0f1fbf8cfec77c2b1ef56f17fd04592b977c305 | 9,115 | py | Python | tests/Preprocessing_Test.py | Maxence-Labesse/MLKit | 7f8d92b5d3e025dc3719c3bbaf1f2e55afda5107 | [
"MIT"
]
| 1 | 2022-01-11T14:13:22.000Z | 2022-01-11T14:13:22.000Z | tests/Preprocessing_Test.py | Maxence-Labesse/MLKit | 7f8d92b5d3e025dc3719c3bbaf1f2e55afda5107 | [
"MIT"
]
| null | null | null | tests/Preprocessing_Test.py | Maxence-Labesse/MLKit | 7f8d92b5d3e025dc3719c3bbaf1f2e55afda5107 | [
"MIT"
]
| 1 | 2020-07-10T09:51:22.000Z | 2020-07-10T09:51:22.000Z | from AutoMxL.Preprocessing.Categorical import *
from AutoMxL.Preprocessing.Date import *
from AutoMxL.Preprocessing.Outliers import *
from AutoMxL.Preprocessing.Missing_Values import *
import unittest
import pandas as pd
import math
# test config
df = pd.read_csv('tests/df_test_bis.csv')
class TestMissingValues(unittest.TestCase):
"""
Test Missing_Values module
"""
def test_fill_numerical(self):
""" fill_numerical function"""
df_fill_all_num = fill_numerical(df, ['Age', 'Height'], method='zero', track_num_NA=True, verbose=False)
self.assertIn('top_NA_Height', df_fill_all_num.columns.tolist())
self.assertIn('top_NA_Age', df_fill_all_num.columns.tolist())
self.assertEqual(df_fill_all_num.iloc[0]['Height'], 0)
self.assertEqual(df_fill_all_num.iloc[1]['Age'], 0)
def test_fill_categorical(self):
""" fill_categorical function"""
df_fill_all_cat = fill_categorical(df, l_var=['Name', 'Sexe'], method='NR', verbose=False)
self.assertEqual(df_fill_all_cat.iloc[3]['Name'], 'NR')
self.assertEqual(df_fill_all_cat.iloc[3]['Sexe'], 'NR')
def test_NAEncoder(self):
""" NAEncoder class"""
NA_encoder1 = NAEncoder(replace_num_with='median', replace_cat_with='NR', track_num_NA=True)
NA_encoder1.fit(df, l_var=['Name', 'Age'])
df_NA1 = NA_encoder1.transform(df)
#
NA_encoder2 = NAEncoder(replace_num_with='zero', replace_cat_with='NR', track_num_NA=False)
df_NA2 = NA_encoder2.fit_transform(df)
# created features
self.assertIn("top_NA_Age", df_NA1.columns.tolist())
self.assertNotIn('top_NA_Height', df_NA1.columns.tolist())
self.assertNotIn("top_NA_Age", df_NA2.columns.tolist())
# raw features contain NA
self.assertEqual(get_NA_features(df),
['Name', 'Id_cat', 'Id_num', 'Verb', 'Age', 'Height', 'Sexe', 'Date_nai', 'American_date_nai'])
# filled features
self.assertEqual(get_NA_features(df_NA1),
['Id_cat', 'Id_num', 'Verb', 'Height', 'Sexe', 'Date_nai', 'American_date_nai'])
self.assertEqual(get_NA_features(df_NA2), [])
# modified values
self.assertTrue(math.isnan(df['Name'][3]))
self.assertTrue(math.isnan(df['Age'][1]))
self.assertEqual(df_NA1['Name'][3], 'NR')
self.assertEqual(df_NA1['Age'][1], 25.5)
self.assertEqual(df_NA2['Name'][3], 'NR')
self.assertEqual(df_NA2['Age'][1], 0)
"""
------------------------------------------------------------------------------------------------
"""
df_to_date = all_to_date(df, ['Date_nai', 'American_date_nai'], verbose=False)
df_to_anc, new_var_list = date_to_anc(df_to_date, l_var=['American_date_nai', 'Date_nai'], date_ref='27/10/2010')
class TestDate(unittest.TestCase):
"""
Test Date Module
"""
def test_all_to_date(self):
""" all_to_date function """
self.assertEqual(np.dtype(df_to_date['American_date_nai']), 'datetime64[ns]')
self.assertEqual(np.dtype(df_to_date['Date_nai']), 'datetime64[ns]')
self.assertEqual(np.dtype(df_to_date['American_date_nai']), 'datetime64[ns]')
def test_date_to_anc(self):
""" date_to_anc function"""
self.assertIn('anc_American_date_nai', df_to_anc.columns)
self.assertIn('anc_Date_nai', df_to_anc.columns)
self.assertNotIn('Date_nai', df_to_anc.columns)
self.assertNotIn('American_Date_nai', df_to_anc.columns)
self.assertEqual(df_to_anc['anc_Date_nai'][0], 0.0)
self.assertIn('anc_American_date_nai', new_var_list)
def test_DateEncoder(self):
""" DateEncoder class"""
Date_encoder1 = DateEncoder(method='timedelta', date_ref='27/10/2010')
Date_encoder1.fit(df, l_var=['American_date_nai', 'Age'])
df_date1 = Date_encoder1.transform(df)
#
date_encoder2 = DateEncoder(method='timedelta', date_ref='27/10/2011')
df_date2 = date_encoder2.fit_transform(df)
# created/removed features
self.assertIn('anc_American_date_nai', df_date1.columns.tolist())
self.assertIn('Date_nai', df_date1.columns.tolist())
self.assertNotIn('anc_Date_nai', df_date1.columns.tolist())
self.assertIn('Age', df_date1.columns.tolist())
self.assertNotIn('American_date_nai', df_date2.columns.tolist())
self.assertNotIn('Date_nai', df_date2.columns.tolist())
# features formats
self.assertEqual(df_date1['anc_American_date_nai'].dtype, 'float64')
self.assertEqual(df_date2['anc_Date_nai'].dtype, 'float64')
# features values
self.assertEqual(df_date1['anc_American_date_nai'][0], 0.0)
self.assertEqual(df_date2['anc_Date_nai'][0], 1.0)
self.assertEqual(df_date2['anc_American_date_nai'][0], 1.0)
"""
------------------------------------------------------------------------------------------------
"""
class TestCategorical(unittest.TestCase):
"""
Test Categorical module
"""
def test_dummy_all_var(self):
""" dummy_all_var func """
df_dummy = dummy_all_var(df, var_list=['Eyes', 'Sexe'], prefix_list=None, keep=False, verbose=False)
df_dummy_pref = dummy_all_var(df, var_list=['Eyes', 'Sexe'], prefix_list=['Ey', 'Sx'], keep=True,
verbose=False)
# created/removed features
self.assertIn('Eyes_blue', df_dummy.columns)
self.assertIn('Eyes_red', df_dummy.columns)
self.assertNotIn('Eyes', df_dummy.columns)
self.assertNotIn('Sexe', df_dummy.columns)
self.assertIn('Ey_blue', df_dummy_pref.columns)
self.assertIn('Sx_M', df_dummy_pref.columns)
self.assertIn('Eyes', df_dummy_pref.columns)
self.assertIn('Sexe', df_dummy_pref.columns)
# features values
self.assertEqual(df_dummy['Eyes_blue'].tolist(), [1, 0, 0, 1, 0, 1])
self.assertEqual(df_dummy['Sexe_M'].tolist(), [1, 1, 1, 0, 0, 1])
def test_CategoricalEncoder(self):
""" CategoricalEncoder """
df_pred = pd.read_csv('tests/df_test.Csv')
df_pred['job'] = df_pred['job'].fillna('NR')
cat_encoder1 = CategoricalEncoder(method='deep_encoder')
df_cat1 = cat_encoder1.fit_transform(df_pred, target='y_yes', l_var=['job', 'education'], verbose=False)
print('\n\n')
#
cat_encoder2 = CategoricalEncoder(method='one_hot')
cat_encoder2.fit(df, l_var=['Name', 'Eyes'], verbose=False)
print('\n\n')
df_cat2 = cat_encoder2.transform(df)
# features created/removed
self.assertIn('job_0', df_cat1.columns.tolist())
self.assertIn('education_0', df_cat1.columns.tolist())
self.assertNotIn('job', df_cat1.columns.tolist())
self.assertIn('Eyes_blue', df_cat2.columns.tolist())
self.assertNotIn('Eyes', df_cat2.columns.tolist())
# features embedding
self.assertEqual(list(cat_encoder1.d_embeddings.keys()), ['job', 'education'])
# features values
self.assertEqual(df_cat2['Eyes_green'].tolist(), [0, 0, 0, 0, 1, 0])
"""
------------------------------------------------------------------------------------------------
"""
class TestOutliers(unittest.TestCase):
def test_replace_category(self):
""" replace_category function """
df_process_cat = replace_category(df, 'Hair', ['blond'], verbose=False)
df_process_cat = replace_category(df_process_cat, 'Name', ['Tom', 'Nick'], verbose=False)
# features values
self.assertEqual(df_process_cat['Name'].tolist(),
['outliers', 'outliers', 'Krish', np.nan, 'John', 'Jack'])
self.assertEqual(df_process_cat['Hair'].tolist(),
['brown', 'brown', 'dark', 'outliers', 'outliers', 'outliers'])
def test_extrem_values(self):
""" extreme_values function """
df_outlier_proc = replace_extreme_values(df, 'Height', 175, 185)
self.assertEqual(df_outlier_proc['Height'].tolist()[1:], [175.0, 180.0, 185.0, 185.0, 185.0])
def test_OutlierEncode(self):
""" OutlierEncodeR class """
out_encoder1 = OutliersEncoder(cat_threshold=0.25, num_xstd=1)
out_encoder1.fit(df, l_var=['Height', 'Sexe', 'Hair', 'Age'], verbose=False)
df_out1 = out_encoder1.transform(df, verbose=False)
out_encoder2 = OutliersEncoder(cat_threshold=0.2, num_xstd=1)
df_out2 = out_encoder2.fit_transform(df, verbose=False)
# cat outliers
self.assertEqual(list(df_out1['Hair']), ['brown', 'brown', 'dark', 'blond', 'blond', 'blond'])
self.assertEqual(list(df_out1['Sexe']), ['M', 'M', 'M', np.nan, 'F', 'M'])
self.assertEqual(list(df_out2['Name']), ['outliers'] * 6)
# num outliers
self.assertEqual(list(df_out1['Height'].round(4))[1:], [175.2177, 180.0, 188.7823, 185.0, 185.0])
self.assertEqual(list(df_out2['Unnamed: 0'].round(4)), [0.7922, 1.0, 2.0, 3.0, 4.0, 4.2078])
print('out2')
| 43.822115 | 120 | 0.622929 | 8,298 | 0.910368 | 0 | 0 | 0 | 0 | 0 | 0 | 2,510 | 0.27537 |
a0f259a7948c591dd236fbcc2a29325e01018267 | 218 | py | Python | PythonTutor/session-4/conditionIfelse.py | krishnamanchikalapudi/examples.py | 7a373d24df06b8882d07b850435b268a24317b1e | [
"MIT"
]
| null | null | null | PythonTutor/session-4/conditionIfelse.py | krishnamanchikalapudi/examples.py | 7a373d24df06b8882d07b850435b268a24317b1e | [
"MIT"
]
| 1 | 2020-02-14T13:24:01.000Z | 2020-02-14T13:24:01.000Z | PythonTutor/session-4/conditionIfelse.py | krishnamanchikalapudi/examples.py | 7a373d24df06b8882d07b850435b268a24317b1e | [
"MIT"
]
| 2 | 2020-02-14T13:21:20.000Z | 2021-06-30T00:50:33.000Z | """
Session: 4
Topic: Conditional: IF ELSE statement
"""
x = 20
y = 100
if (x > y):
print ('x > y is true')
print ('new line 1')
else:
print('x > y is false')
print('new line 2')
print ('new line 3')
| 13.625 | 37 | 0.550459 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.56422 |
a0f3c7164fd5d0e03360ed4d29df99912a368e12 | 915 | py | Python | day02/day02.py | pogross/adventofcode2021 | 33fc177d30e1104a6203e435f83594c4d3774cdb | [
"MIT"
]
| null | null | null | day02/day02.py | pogross/adventofcode2021 | 33fc177d30e1104a6203e435f83594c4d3774cdb | [
"MIT"
]
| null | null | null | day02/day02.py | pogross/adventofcode2021 | 33fc177d30e1104a6203e435f83594c4d3774cdb | [
"MIT"
]
| null | null | null | def execute_command(command: str) -> (int):
direction, magnitude = command.split(" ")
horizontal, depth = 0, 0
if direction == "forward":
horizontal += int(magnitude)
elif direction == "up":
depth -= int(magnitude)
elif direction == "down":
depth += int(magnitude)
return horizontal, depth
def chain_commands(commands: list[str]) -> (int):
horizontal, depth = 0, 0
for command in commands:
horizontal_change, depth_change = execute_command(command)
horizontal += horizontal_change
depth += depth_change
return horizontal, depth
if __name__ == "__main__":
with open("input.txt") as f:
raw = f.read()
commands = [x for x in raw.split("\n")]
horizontal, depth = chain_commands(commands)
print(f"First answer is {horizontal*depth}")
# print(f"Second answer is {count_increasing(measurements, 3)}")
| 26.911765 | 68 | 0.636066 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 148 | 0.161749 |
a0f92a83ae88dda1724d8249cb3715aea8d6c4ad | 2,073 | py | Python | execute.py | r-kapoor/ranking-extractions | 59ed7f23d120d1bc7f0ee2af48ffa61817fd1715 | [
"MIT"
]
| null | null | null | execute.py | r-kapoor/ranking-extractions | 59ed7f23d120d1bc7f0ee2af48ffa61817fd1715 | [
"MIT"
]
| null | null | null | execute.py | r-kapoor/ranking-extractions | 59ed7f23d120d1bc7f0ee2af48ffa61817fd1715 | [
"MIT"
]
| null | null | null | import codecs
import json
import rank
import train_ranker
#Files to be present in home dir
TRAINING_FILE_CITIES = 'manual_7_cities.jl'
TRAINING_FILE_NAMES = 'manual_50_names.jl'
TRAINING_FILE_ETHNICITIES = 'manual_50_ethnicities.jl'
ACTUAL_FILE_CITIES = 'manual_50_cities.jl'
ACTUAL_FILE_NAMES = 'manual_50_names.jl'
ACTUAL_FILE_ETHNICITIES = 'manual_50_ethnicities.jl'
EMBEDDINGS_FILE = 'unigram-part-00000-v2.json'
FIELD_NAMES_CITIES = {
"text_field": "readability_text",
"annotated_field":"annotated_cities",
"correct_field":"correct_cities"
}
FIELD_NAMES_NAMES = {
"text_field": "readability_text",
"annotated_field":"annotated_names",
"correct_field":"correct_names"
}
FIELD_NAMES_ETHNICITIES = {
"text_field": "readability_text",
"annotated_field":"annotated_ethnicities",
"correct_field":"correct_ethnicities"
}
def read_embedding_file(embeddings_file):
unigram_embeddings = dict()
with codecs.open(embeddings_file, 'r', 'utf-8') as f:
for line in f:
obj = json.loads(line)
for k, v in obj.items():
unigram_embeddings[k] = v
return unigram_embeddings
def get_texts(json_object):
"""
Parsing logic for getting texts
"""
texts = list()
texts.append(json_object.get(FIELD_NAMES_CITIES['text_field']))
return texts
def get_annotated_list(json_object):
"""
Parsing logic for getting annotated field
"""
return json_object.get(FIELD_NAMES_CITIES['annotated_field'])
embeddings_dict = read_embedding_file(EMBEDDINGS_FILE)
classifier = train_ranker.train_ranker(embeddings_dict, TRAINING_FILE_CITIES, FIELD_NAMES_CITIES)
with codecs.open(ACTUAL_FILE_CITIES, 'r', 'utf-8') as f:
for line in f:
obj = json.loads(line)
list_of_texts = get_texts(obj)
annotated_list = get_annotated_list(obj)
print "Annotated tokens:",
print annotated_list
ranked_list = rank.rank(embeddings_dict, list_of_texts, annotated_list, classifier)
print "Ranked List:",
print ranked_list
| 29.614286 | 97 | 0.721177 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 675 | 0.325615 |
a0f9341f558e2700ed30e7586738a7942212308d | 336 | py | Python | Python-codes-CeV/32-Leap_year.py | engcristian/Python | 726a53e9499fd5d0594572298e59e318f98e2d36 | [
"MIT"
]
| 1 | 2021-02-22T03:53:23.000Z | 2021-02-22T03:53:23.000Z | Python-codes-CeV/32-Leap_year.py | engcristian/Python | 726a53e9499fd5d0594572298e59e318f98e2d36 | [
"MIT"
]
| null | null | null | Python-codes-CeV/32-Leap_year.py | engcristian/Python | 726a53e9499fd5d0594572298e59e318f98e2d36 | [
"MIT"
]
| null | null | null | ''' Calculat the leap year'''
from datetime import date
year = int(input('What year do you want to analyse? Type 0 for the current year.'))
if year == 0:
year = date.today().year
if year%4 ==0 and year%100 != 0 or year%400 == 0:
print(F"The year {year} it's a LEAP year.".)
else:
print(F"The year {year} isn't a LEAP year.") | 37.333333 | 84 | 0.645833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.497024 |
a0f9bbfc405c03e8dff904c969ce60482f1a635c | 567 | py | Python | thesis/code/fairness/gen.py | fz1989/master-thesis | e47af8c90d8d18d87f906a7a4bcadb64669e70db | [
"MIT"
]
| null | null | null | thesis/code/fairness/gen.py | fz1989/master-thesis | e47af8c90d8d18d87f906a7a4bcadb64669e70db | [
"MIT"
]
| null | null | null | thesis/code/fairness/gen.py | fz1989/master-thesis | e47af8c90d8d18d87f906a7a4bcadb64669e70db | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
#coding=utf-8
import numpy
class Task(object):
def __init__(self, cpu, mem):
self.cpu = cpu
self.mem = mem
def __repr__(self):
return ("%d,%d") % (self.cpu, self.mem)
def get_task_list():
task_list = []
for i in range(0,30):
cpu = numpy.random.randint(1,8)
mem = numpy.random.randint(1,8)
task_list.append(Task(cpu, mem))
return task_list
if __name__ == "__main__":
task_list = get_task_list()
for task in task_list:
print "%d\t%d" % (task.cpu, task.mem)
| 21 | 47 | 0.589065 | 173 | 0.305115 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.104056 |
a0fa30f527e6c86b6cb9dc5b7f38c0821721deb9 | 71 | py | Python | tests/routes/__init__.py | Bachhofer/spottydata | e9334c2a32bb65018b57d83fc4522ae241427db7 | [
"MIT"
]
| null | null | null | tests/routes/__init__.py | Bachhofer/spottydata | e9334c2a32bb65018b57d83fc4522ae241427db7 | [
"MIT"
]
| null | null | null | tests/routes/__init__.py | Bachhofer/spottydata | e9334c2a32bb65018b57d83fc4522ae241427db7 | [
"MIT"
]
| null | null | null | # This is an empty python file to expose this directory to it's parent
| 35.5 | 70 | 0.774648 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.985915 |
a0fccc7e51abcecde4662d4c35aa618544e6087c | 7,500 | py | Python | Perceptual Hash -Asher/ex1/example_solution.py | kidist-amde/image-search-engine | 467d022f7248a74822dd9ae938b5b86333ce417a | [
"MIT"
]
| null | null | null | Perceptual Hash -Asher/ex1/example_solution.py | kidist-amde/image-search-engine | 467d022f7248a74822dd9ae938b5b86333ce417a | [
"MIT"
]
| null | null | null | Perceptual Hash -Asher/ex1/example_solution.py | kidist-amde/image-search-engine | 467d022f7248a74822dd9ae938b5b86333ce417a | [
"MIT"
]
| null | null | null | import os
import cv2
from sklearn.cluster import KMeans, DBSCAN, MiniBatchKMeans
from scipy import spatial
from sklearn.preprocessing import StandardScaler
import numpy as np
from tqdm import tqdm
import argparse
parser = argparse.ArgumentParser(description='Challenge presentation example')
parser.add_argument('--data_path',
'-d',
type=str,
default='dataset',
help='Dataset path')
parser.add_argument('--output_dim',
'-o',
type=int,
default=20,
help='Descriptor length')
parser.add_argument('--save_dir',
'-s',
type=str,
default=None,
help='Save or not gallery/query feats')
parser.add_argument('--random',
'-r',
action='store_true',
help='Random run')
args = parser.parse_args()
class Dataset(object):
def __init__(self, data_path):
self.data_path = data_path
assert os.path.exists(self.data_path), 'Insert a valid path!'
self.data_classes = os.listdir(self.data_path)
self.data_mapping = {}
for c, c_name in enumerate(self.data_classes):
temp_path = os.path.join(self.data_path, c_name)
temp_images = os.listdir(temp_path)
for i in temp_images:
img_tmp = os.path.join(temp_path, i)
if img_tmp.endswith('.jpg'):
if c_name == 'distractor':
self.data_mapping[img_tmp] = -1
else:
self.data_mapping[img_tmp] = int(c_name)
print('Loaded {:d} from {:s} images'.format(len(self.data_mapping.keys()),
self.data_path))
def get_data_paths(self):
images = []
classes = []
for img_path in self.data_mapping.keys():
if img_path.endswith('.jpg'):
images.append(img_path)
classes.append(self.data_mapping[img_path])
return images, np.array(classes)
def num_classes(self):
return len(self.data_classes)
class FeatureExtractor(object):
def __init__(self, feature_extractor, model, out_dim=20, scale=None,
subsample=100):
self.feature_extractor = feature_extractor
self.model = model
self.scale = scale
self.subsample = subsample
def get_descriptor(self, img_path):
img = cv2.imread(img_path)
if self.gray:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
kp, descs = self.feature_extractor.detectAndCompute(img, None)
return descs
def fit_model(self, data_list):
training_feats = []
# we extact SIFT descriptors
for img_path in tqdm(data_list, desc='Fit extraction'):
descs = self.get_descriptor(img_path)
if descs is None:
continue
if self.subsample:
# TODO: change here
sub_idx = np.random.choice(np.arange(descs.shape[0]), self.subsample)
descs = descs[sub_idx, :]
training_feats.append(descs)
training_feats = np.concatenate(training_feats)
print('--> Model trained on {} features'.format(training_feats.shape))
# we fit the model
self.model.fit(training_feats)
print('--> Model fitted')
def fit_scaler(self, data_list):
features = self.extract_features(data_list)
print('--> Scale trained on {}'.format(features.shape))
self.scale.fit(features)
print('--> Scale fitted')
def extract_features(self, data_list):
# we init features
features = np.zeros((len(data_list), self.model.n_clusters))
for i, img_path in enumerate(tqdm(data_list, desc='Extraction')):
# get descriptor
descs = self.get_descriptor(img_path)
# 2220x128 descs
preds = self.model.predict(descs)
histo, _ = np.histogram(preds, bins=np.arange(self.model.n_clusters+1), density=True)
# append histogram
features[i, :] = histo
return features
def scale_features(self, features):
# we return the normalized features
return self.scale.transform(features)
def topk_accuracy(gt_label, matched_label, k=1):
matched_label = matched_label[:, :k]
total = matched_label.shape[0]
correct = 0
for q_idx, q_lbl in enumerate(gt_label):
correct+= np.any(q_lbl == matched_label[q_idx, :]).item()
acc_tmp = correct/total
return acc_tmp
def main():
data_path = 'C:/Users/21032/Desktop/dataset'
# we define training dataset
training_path = os.path.join(data_path, 'training')
# we define validation dataset
validation_path = os.path.join(data_path, 'validation')
gallery_path = os.path.join(validation_path, 'gallery')
query_path = os.path.join(validation_path, 'query')
training_dataset = Dataset(data_path=training_path)
gallery_dataset = Dataset(data_path=gallery_path)
query_dataset = Dataset(data_path=query_path)
# get training data and classes
training_paths, training_classes = training_dataset.get_data_paths()
# we get validation gallery and query data
gallery_paths, gallery_classes = gallery_dataset.get_data_paths()
query_paths, query_classes = query_dataset.get_data_paths()
if not args.random:
feature_extractor = cv2.SIFT_create()
# we define model for clustering
model = KMeans(n_clusters=args.output_dim, n_init=10, max_iter=5000, verbose=False)
# model = MiniBatchKMeans(n_clusters=args.output_dim, random_state=0, batch_size=100, max_iter=100, verbose=False)
scale = StandardScaler()
# we define the feature extractor providing the model
extractor = FeatureExtractor(feature_extractor=feature_extractor,
model=model,
scale=scale,
out_dim=args.output_dim)
# we fit the KMeans clustering model
extractor.fit_model(training_paths)
extractor.fit_scaler(training_paths)
# now we can use features
# we get query features
query_features = extractor.extract_features(query_paths)
query_features = extractor.scale_features(query_features)
# we get gallery features
gallery_features = extractor.extract_features(gallery_paths)
gallery_features = extractor.scale_features(gallery_features)
print(gallery_features.shape, query_features.shape)
pairwise_dist = spatial.distance.cdist(query_features, gallery_features, 'minkowski', p=2.)
print('--> Computed distances and got c-dist {}'.format(pairwise_dist.shape))
indices = np.argsort(pairwise_dist, axis=-1)
else:
indices = np.random.randint(len(gallery_paths),
size=(len(query_paths), len(gallery_paths)))
gallery_matches = gallery_classes[indices]
print('########## RESULTS ##########')
for k in [1, 3, 10]:
topk_acc = topk_accuracy(query_classes, gallery_matches, k)
print('--> Top-{:d} Accuracy: {:.3f}'.format(k, topk_acc))
if __name__ == '__main__':
main()
| 34.246575 | 122 | 0.608133 | 3,471 | 0.4628 | 0 | 0 | 0 | 0 | 0 | 0 | 1,197 | 0.1596 |
a0fd132d4d35c39d83a7f211d5d4e4443ddf2030 | 1,399 | py | Python | src/modeling/train_test.py | samsonq/Macroeconomic-Default-Analysis | 1a155873f951b1584c33c2d91bd525b67f78136d | [
"MIT"
]
| 4 | 2020-06-12T22:20:48.000Z | 2021-08-08T15:49:38.000Z | src/modeling/train_test.py | samsonq/Macroeconomic-Default-Analysis | 1a155873f951b1584c33c2d91bd525b67f78136d | [
"MIT"
]
| 1 | 2020-04-15T07:11:43.000Z | 2020-04-15T07:11:43.000Z | src/modeling/train_test.py | samsonq/Macroeconomic-Default-Analysis | 1a155873f951b1584c33c2d91bd525b67f78136d | [
"MIT"
]
| 3 | 2020-09-18T02:27:58.000Z | 2021-10-30T21:22:10.000Z | """
Prepare training, validation, and testing data after preprocessing of the large dataset. Used in
training and evaluating models.
"""
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
def feature_selection(data, features):
"""
Choose which features to use for training.
:param data: preprocessed dataset
:param features: list of features to use
:return: data with selected features
"""
return data[features]
def prepare_data(data, label="loan_status", valid_split=0.2, test_split=0.3):
"""
Splits and returns the training and validation sets for the data.
:param data: preprocessed dataset
:param label: label of data
:param valid_split: percentage to use as validation data
:param test_split: percentage to use as test data
:returns: training, validation, testing sets
"""
X_train = data.drop(label, axis=1) # define training features set
y_train = data[label] # define training label set
# use part of the data as testing data
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=test_split, random_state=0)
# use part of the training data as validation data
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=valid_split, random_state=0)
return X_train, X_valid, X_test, y_train, y_valid, y_test
| 35.871795 | 114 | 0.735525 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 793 | 0.566833 |
a0fd2af6803ffa9be2e8f4bfae48a6a7e68eb4ea | 179,927 | py | Python | cyberradiodriver/CyberRadioDriver/radio.py | CyberRadio/CyberRadioDriver | 44e6fc0e805981981514e6edc18d11d5fa33e659 | [
"MIT"
]
| null | null | null | cyberradiodriver/CyberRadioDriver/radio.py | CyberRadio/CyberRadioDriver | 44e6fc0e805981981514e6edc18d11d5fa33e659 | [
"MIT"
]
| null | null | null | cyberradiodriver/CyberRadioDriver/radio.py | CyberRadio/CyberRadioDriver | 44e6fc0e805981981514e6edc18d11d5fa33e659 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
###############################################################
# \package CyberRadioDriver.radio
#
# \brief Defines basic functionality for radio handler objects.
#
# \note This module defines basic behavior only. To customize
# a radio handler class for a particular radio, derive a new
# class from the appropriate base class. It is recommended
# that behavior specific to a given radio be placed in the
# module that supports that radio.
#
# \author NH
# \author DA
# \author MN
# \copyright Copyright (c) 2014-2021 CyberRadio Solutions, Inc.
# All rights reserved.
#
###############################################################
# Imports from other modules in this package
from . import command
from . import components
from . import configKeys
from . import log
from . import transport
# Imports from external modules
# Python standard library imports
import ast
import copy
import datetime
import json
import math
import sys
import time
import traceback
import threading
##
# \internal
# \brief Returns the MAC address and IP address for a given Ethernet interface.
#
# \param ifname The name of t# \author DA
# \param ifname The Ethernet system interface ("eth0", for example).
# \returns A 2-tuple: (MAC Address, IP Address).
def getInterfaceAddresses(ifname):
import socket,fcntl,struct
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
mac = ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
ip = socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
return mac,ip
##
# \internal
# \brief VITA 49 interface specification class.
#
# The _ifSpec class describes how the VITA 49 interface is set up for
# a particular radio. Each radio should have its own interface
# specification, implemented as a subclass of _ifSpec.
#
# Radio handler classes need to set static member "ifSpec" to the interface
# specification class that the radio uses.
class _ifSpec():
## Whether Vita 49.1 is used
vita49_1 = True
## Whether Vita 49.0 is used
vita49_0 = True
## Size of the VITA 49 header, in 32-byte words
headerSizeWords = 0
## Size of the payload, in 32-byte words
payloadSizeWords = 0
## Size of the VITA 49 "tail", in 32-byte words
tailSizeWords = 0
## Byte order used by the radio.
byteOrder = "little"
## Whether the I/Q data in the payload are swapped
iqSwapped = False
@classmethod
def getFrameInfoDict(cls, self):
return {
"headerWords": cls.headerSizeWords,
"payloadWords": cls.payloadSizeWords,
"tailWords": cls.tailSizeWords,
"frameSize": (cls.headerSizeWords+cls.payloadSizeWords+cls.tailSizeWords)*4,
"v49.1": cls.vita49_1,
"v49.0": cls.vita49_0,
"byteSwap": cls.byteOrder!=sys.byteorder,
"iqSwap": cls.iqSwapped,
}
#-- Radio Handler Objects ---------------------------------------------#
##
# \brief Base radio handler class.
#
# This class implements the CyberRadioDriver.IRadio interface.
#
# To add a supported radio to this driver, derive a class from
# _radio and change the static members of the new class to describe the
# capabilities of that particular radio. Each supported radio should
# have its own module under the CyberRadioDriver.radios package tree.
#
# A radio handler object maintains a series of component objects, one
# per component of each type (tuner, WBDDC, NBDDC, etc.). Each component
# object is responsible for managing the hardware object that it represents.
# Each component object is also responsible for querying the component's
# current configuration and for maintaining the object's configuration
# as it changes during radio operation.
#
# A radio handler object also maintains its own configuration, for settings
# that occur at the radio level and are not managed by a component object.
#
# \note Several static members of this class have no function within the
# code, but instead help CyberRadioDriver.getRadioObjectDocstring() generate
# appropriate documentation for derived radio handler classes.
#
# \implements CyberRadioDriver::IRadio
class _radio(log._logger, configKeys.Configurable):
_name = "NDRgeneric"
## \brief Radio uses JSON command/response interface?
json = False
## \brief VITA 49 interface specification class name (see _ifSpec class).
ifSpec = _ifSpec
## \brief Dictionary of VITA 49 interface specification classes, keyed by
# payload type strings, for those radios that support more than one VITA
# packet format.
ifSpecMap = {}
## \brief Analog-to-digital Converter clock rate
adcRate = 1.0
# Tuner settings
## \brief Number of tuners
numTuner = 0
## \brief Number of tuner boards
numTunerBoards = 1
## \brief Tuner index base (what number indices start at)
tunerIndexBase = 0
## \brief Tuner component type
tunerType = components._tuner
## \brief Tuner index overrides. Used for radios with
# WBDDC settings
## \brief Number of WBDDCs available
numWbddc = numTuner
## \brief WBDDC index base (what number indices start at)
wbddcIndexBase = 1
## \brief WBDDC component type
wbddcType = components._wbddc
# NBDDC settings
## \brief Number of NBDDCs
numNbddc = 0
## \brief NBDDC index base (what number indices start at)
nbddcIndexBase = 1
## \brief NBDDC component type
nbddcType = components._nbddc
## \brief NBDDC index list override. This is a list of discrete indices
# for radios where the indices are a subset of the full index list.
# This should be set to None otherwise.
nbddcIndexOverride = None
# FFT Processor Settings
## \brief Number of FFT Channels
numFftStream = 0
## \brief FFT stream index base (what number indices start at)
fftStreamIndexBase = 0
## \brief FFT stream component type
fftStreamType = None
# Transmitter settings
## \brief Number of transmitters
numTxs = 0
## \brief Transmitter index base (what number indices start at)
txIndexBase = 1
## \brief Transmitter component type
txType = None
# WBDUC Settings
## \brief Number of WBDUC
numWbduc = 0
## \brief WBDUC index base (what number indices start at)
wbducIndexBase = 1
## \brief WBDUC component type
wbducType = None
# NBDUC Settings
## \brief Number of NBDUC
numNbduc = 0
## \brief NBDUC index base (what number indices start at)
nbducIndexBase = 1
## \brief NBDUC component type
nbducType = None
# WBDDC Group settings
## \brief Number of WBDDC groups available
numWbddcGroups = 0
## \brief WBDDC group index base (what number indices start at)
wbddcGroupIndexBase = 1
## \brief WBDDC Group component type
wbddcGroupType = None
# NBDDC Group settings
## \brief Number of NBDDC groups available
numNbddcGroups = 0
## \brief NBDDC group index base (what number indices start at)
nbddcGroupIndexBase = 1
## \brief NBDDC Group component type
nbddcGroupType = None
# Combined DDC Group settings
## \brief Number of combined DDC groups available
numCddcGroups = 0
## \brief Combined DDC group index base (what number indices start at)
cddcGroupIndexBase = 1
## \brief Combined DDC Group component type
cddcGroupType = None
# WBDUC Group settings
## \brief Number of WBDUC groups available
numWbducGroups = 0
## \brief WBDUC group index base (what number indices start at)
wbducGroupIndexBase = 1
## \brief WBDUC Group component type
wbducGroupType = None
# Tuner Group settings
## \brief Number of tuner groups available
numTunerGroups = 0
## \brief Tuner group index base (what number indices start at)
tunerGroupIndexBase = 1
## \brief Tuner Group component type
tunerGroupType = None
# UDP destination information
## \brief What the UDP destination setting represents for this radio
udpDestInfo = ""
## \brief Number of Gigabit Ethernet ports
numGigE = 0
## \brief Gigabit Ethernet port index base (what number indices start at)
gigEIndexBase = 1
## \brief Number of destination IP table entries for each Gigabit Ethernet port
numGigEDipEntries = 0
## \brief Gigabit Ethernet destination IP table index base (what number indices start at)
gigEDipEntryIndexBase = 0
# Supported command set. Each member listed here is either a
# command class (one derived from command._commandBase) or None
# if the command is not supported for a given radio.
## \brief Command: Identity query
idnQry = command.idn
## \brief Command: Version query
verQry = command.ver
## Command: Hardware revision query
hrevQry = command.hrev
## \brief Command: Status query
statQry = command.stat
## \brief Command: Tuner status query
tstatQry = command.tstat
## \brief Command: Time adjustment set/query
tadjCmd = None
## \brief Command: Reset
resetCmd = command.reset
## \brief Command: Configuration mode set/query
cfgCmd = command.cfg
## \brief Command: Pulse-per-second (PPS) set/query
ppsCmd = None
## \brief Command: UTC time set/query
utcCmd = None
## \brief Command: Reference mode set/query
refCmd = command.ref
## \brief Command: Reference bypass mode set/query
rbypCmd = None
## \brief Command: Source IP address set/query
sipCmd = command.sip
## \brief Command: Destination IP address set/query
dipCmd = command.dip
## \brief Command: Source MAC address set/query
#
# \note Most radios support \e querying the source MAC address, but few
# support \e setting it.
smacCmd = command.smac
## \brief Command: Destination MAC address set/query
dmacCmd = command.dmac
## \brief Command: Calibration frequency set/query
calfCmd = None
## \brief Command: Narrowband source selection set/query
nbssCmd = None
## \brief Command: Frequency normalization mode set/query
fnrCmd = None
## \brief Command: GPS receiver enable set/query
gpsCmd = None
## \brief Command: GPS position query
gposCmd = None
## \brief Command: Reference tuning voltage set/query
rtvCmd = None
## \brief Command: Radio temperature query
tempCmd = None
## \brief Command: GPIO output (static) set/query
gpioStaticCmd = None
## \brief Command: GPIO output (sequence) set/query
gpioSeqCmd = None
## \brief Command: Gigabit Ethernet interface flow control set/query
tgfcCmd = None
## \brief Coherent tuning command
cohTuneCmd = None
## \brief FPGA state selection command
fpgaStateCmd = None
## \brief Radio function (mode) selection command
funCmd = None
## \brief Radio Cntrl command
cntrlCmd = None
# Mode settings
## \brief Supported reference modes
refModes = {}
## \brief Supported reference bypass modes
rbypModes = {}
## \brief Supported VITA 49 enabling options
vitaEnableOptions = {}
## \brief Supported connection modes
connectionModes = ["tty"]
## \brief Default baud rate (has no effect if radio does not use TTY)
defaultBaudrate = 921600
## \brief Default port number (has no effect if radio does not use network connections)
defaultPort = 8617
## \brief Default timeout for communications over the radio transport
defaultTimeout = transport.radio_transport.defaultTimeout
## \brief Does this radio support setting the tuner bandwidth?
tunerBandwithSettable = False
## \brief Tuner bandwidth (Hz) for radios that do not support setting it
tunerBandwidthConstant = 40e6
##
# \brief The list of valid configuration keywords supported by this
# object. Override in derived classes as needed.
validConfigurationKeywords = [configKeys.CONFIG_MODE,
configKeys.REFERENCE_MODE,
configKeys.BYPASS_MODE,
configKeys.CALIB_FREQUENCY,
configKeys.FNR_MODE,
configKeys.GPS_ENABLE,
configKeys.REF_TUNING_VOLT,
configKeys.GIGE_FLOW_CONTROL,
]
## \brief Default "set time" value
setTimeDefault = False
##
# \brief Constructs a radio handler object.
#
# \copydetails CyberRadioDriver::IRadio::\_\_init\_\_()
def __init__(self, *args, **kwargs):
self._setConfigLock = threading.RLock()
# Set up configuration capability
configKeys.Configurable.__init__(self)
# Consume keyword arguments "verbose" and "logFile" for logging support
log._logger.__init__(self, *args, **kwargs)
# Now consume our own
self.setTime = kwargs.get("setTime",self.setTimeDefault)
self.logCtrl = kwargs.get("logCtrl",None)
self.transportTimeout = kwargs.get("timeout", None)
self.clientId = kwargs.get("clientId", None)
if self.transportTimeout is None:
self.transportTimeout = self.defaultTimeout
self.name = "%s%s"%(self._name,"-%s"%kwargs.get("name") if "name" in kwargs else "",)
self.logIfVerbose("Verbose mode!")
# Communication transport in use
self.transport = None
self.tunerDict = {}
self.wbddcDict = {}
self.nbddcDict = {}
self.fftStreamDict = {}
self.txDict = {}
self.wbducDict = {}
self.nbducDict = {}
self.wbddcGroupDict = {}
self.nbddcGroupDict = {}
self.cddcGroupDict = {}
self.wbducGroupDict = {}
self.tunerGroupDict = {}
self.componentList = []
# Little hack to ensure numWbddc is always set (we didn't always have this attribute).
if self.numWbddc is None:
self.numWbddc = self.numTuner
# Form the actual index lists for the different components. Now that certain components
# have discrete index values rather than a full sequence, we need to store these for
# later use.
self.tunerIndexList = list(range(self.tunerIndexBase, self.tunerIndexBase + self.numTuner))
self.wbddcIndexList = list(range(self.wbddcIndexBase, self.wbddcIndexBase + self.numWbddc))
self.nbddcIndexList = self.nbddcIndexOverride if self.nbddcIndexOverride is not None else \
list(range(self.nbddcIndexBase, self.nbddcIndexBase + self.numNbddc))
self.fftStreamIndexList = list(range(self.fftStreamIndexBase, self.fftStreamIndexBase + self.numFftStream))
self.txIndexList = list(range(self.txIndexBase, self.txIndexBase + self.numTxs))
self.wbducIndexList = list(range(self.wbducIndexBase, self.wbddcIndexBase + self.numWbduc))
self.nbducIndexList = list(range(self.nbducIndexBase, self.nbddcIndexBase + self.numNbduc))
self.wbddcGroupIndexList = list(range(self.wbddcGroupIndexBase, self.wbddcGroupIndexBase + self.numWbddcGroups))
self.nbddcGroupIndexList = list(range(self.nbddcGroupIndexBase, self.nbddcGroupIndexBase + self.numNbddcGroups))
self.cddcGroupIndexList = list(range(self.cddcGroupIndexBase, self.cddcGroupIndexBase + self.numCddcGroups))
self.wbducGroupIndexList = list(range(self.wbducGroupIndexBase, self.wbducGroupIndexBase + self.numWbducGroups))
self.tunerGroupIndexList = list(range(self.tunerGroupIndexBase, self.tunerGroupIndexBase + self.numTunerGroups))
self.gigEIndexList = list(range(self.gigEIndexBase, self.gigEIndexBase + self.numGigE))
self.gigEDipEntryIndexList = list(range(self.gigEDipEntryIndexBase, self.gigEDipEntryIndexBase + self.numGigEDipEntries))
self.txToneGenIndexList = [] if self.numTxs == 0 else \
list(range(self.txType.toneGenIndexBase, self.txType.toneGenIndexBase + self.txType.numToneGen))
# Make component objects
for objRange,objType,objDict in ( \
(self.tunerIndexList,self.tunerType,self.tunerDict), \
(self.wbddcIndexList,self.wbddcType,self.wbddcDict), \
(self.nbddcIndexList,self.nbddcType,self.nbddcDict), \
(self.fftStreamIndexList,self.fftStreamType,self.fftStreamDict), \
(self.txIndexList,self.txType,self.txDict), \
(self.wbducIndexList,self.wbducType,self.wbducDict), \
(self.nbducIndexList,self.nbducType,self.nbducDict), \
(self.wbddcGroupIndexList,self.wbddcGroupType,self.wbddcGroupDict), \
(self.nbddcGroupIndexList,self.nbddcGroupType,self.nbddcGroupDict), \
(self.cddcGroupIndexList,self.cddcGroupType,self.cddcGroupDict), \
(self.wbducGroupIndexList,self.wbducGroupType,self.wbducGroupDict), \
(self.tunerGroupIndexList,self.tunerGroupType,self.tunerGroupDict), \
):
if objType is not None:
for objInd in objRange:
objDict[objInd] = objType(parent=self, transport=None,
index=objInd, verbose=self.verbose,
logFile=self.logFile)
self.componentList.append( objDict[objInd] )
self.sipTable = {}
self.dipTable = {}
self.versionInfo = {}
# State variables
# -- is the radio connected through crdd?
self.isCrddConnection = False
# -- crdd command prefix, which tells crdd that this isn't a pass-through
# radio command. Set this to four vertical bars, because no NDR-class
# radio uses them.
self.crddCommandPrefix = "||||"
# Set the time on the radio
self.setTime = False
self.connectError = ""
##
# \brief Destroys a radio handler object.
#
# \copydetails CyberRadioDriver::IRadio::\_\_del\_\_()
def __del__(self):
if self.isConnected():
self.disconnect()
##
# \brief Indicates whether the radio is connected.
#
# \copydetails CyberRadioDriver::IRadio::isConnected()
def isConnected(self,):
return (self.transport is not None and self.transport.connected)
##
# \brief Returns version information for the radio.
#
# \copydetails CyberRadioDriver::IRadio::getVersionInfo()
def getVersionInfo(self):
# If this is a crdd connection, try to get the version info from
# crdd's radio handler rather than through direct radio commands
if self.isCrddConnection:
# Get the radio's version information from crdd
rsp = self._crddSendCommand(cmd="GETVERINFO")
if rsp is not None:
# Set the version info by running the first response string (the
# version info dict) through ast.literal_eval().
self.versionInfo = ast.literal_eval(rsp[0])
# Query hardware for details if we don't have them already
if not all([key in self.versionInfo for key in \
[configKeys.VERINFO_MODEL, configKeys.VERINFO_SN]]):
cmd = self.idnQry(parent=self,
query=True,
verbose=self.verbose, logFile=self.logFile)
cmd.send( self.sendCommand, )
self._addLastCommandErrorInfo(cmd)
rspInfo = cmd.getResponseInfo()
if rspInfo is not None:
self.versionInfo.update(rspInfo)
if not all([key in self.versionInfo for key in [configKeys.VERINFO_SW,
configKeys.VERINFO_FW,
configKeys.VERINFO_REF]]):
cmd = self.verQry(parent=self,
query=True,
verbose=self.verbose, logFile=self.logFile)
cmd.send( self.sendCommand, )
self._addLastCommandErrorInfo(cmd)
rspInfo = cmd.getResponseInfo()
if rspInfo is not None:
self.versionInfo.update(rspInfo)
if not all([key in self.versionInfo for key in [configKeys.VERINFO_MODEL,
configKeys.VERINFO_SN,
configKeys.VERINFO_UNITREV,
configKeys.VERINFO_HW]]):
cmd = self.hrevQry(parent=self,
query=True,
verbose=self.verbose, logFile=self.logFile)
cmd.send( self.sendCommand, )
self._addLastCommandErrorInfo(cmd)
rspInfo = cmd.getResponseInfo()
if rspInfo is not None:
# Don't mask previously determined model and S/N information!
for key in [configKeys.VERINFO_MODEL, configKeys.VERINFO_SN]:
if key in self.versionInfo and key in rspInfo:
del rspInfo[key]
self.versionInfo.update(rspInfo)
for key in [configKeys.VERINFO_MODEL, configKeys.VERINFO_SN,
configKeys.VERINFO_SW, configKeys.VERINFO_FW,
configKeys.VERINFO_REF, configKeys.VERINFO_UNITREV,
configKeys.VERINFO_HW]:
if key not in self.versionInfo:
self.versionInfo[key] = "N/A"
return self.versionInfo
##
# \brief Returns connection information for the radio.
#
# \copydetails CyberRadioDriver::IRadio::getConnectionInfo()
def getConnectionInfo(self):
connectionInfo = {}
# Connection information
connectionInfo["mode"] = self.mode
if self.mode in ("tcp","udp","https"):
connectionInfo["hostname"] = self.host_or_dev
connectionInfo["port"] = "%d" % self.port_or_baudrate
elif self.mode == "tty":
connectionInfo["device"] = self.host_or_dev
connectionInfo["baudrate"] = "%d" % self.port_or_baudrate
return connectionInfo
##
# \brief Connects to a given radio.
#
# \copydetails CyberRadioDriver::IRadio::connect()
def connect(self,mode,host_or_dev,port_or_baudrate=None,setTime=False,initDdc=False,
reset=False, fcState=None):
if mode in self.connectionModes:
self.mode = mode
self.host_or_dev = host_or_dev
self.port_or_baudrate = port_or_baudrate
if self.port_or_baudrate is None:
self.port_or_baudrate = self.defaultBaudrate if mode == "tty" else \
self.defaultPort
self.logIfVerbose("USING PORT %r"%(self.port_or_baudrate))
if self.transport is not None:
self.transport.disconnect()
self.transport = None
self.transport = transport.radio_transport(parent=self,verbose=self.verbose,
logFile=self.logFile,
logCtrl=self.logCtrl,
json=self.json,
timeout=self.transportTimeout)
if self.transport.connect(mode, self.host_or_dev, self.port_or_baudrate):
if self.isCrddConnection:
self._crddInitialize()
# Query the configuration if we didn't already have it
if self.configuration == {}:
self._queryConfiguration()
for obj in self.componentList:
obj.addTransport(self.transport, self.sendCommand,
not self.isCrddConnection)
self.getVersionInfo()
if reset:
self.sendReset()
if setTime:
self.setTimeNextPps()
if initDdc:
self.setDdcConfiguration(wideband=True,)
self.setDdcConfiguration(wideband=False,)
if fcState is not None:
try:
self.setTenGigFlowControlStatus(fcState)
except:
pass
return True
else:
self.connectError = self.transport.connectError
self.disconnect()
return False
else:
self.log("Unsupported connection mode: %s", str(mode))
return False
##
# \brief Disconnects from the radio.
#
# \copydetails CyberRadioDriver::IRadio::disconnect()
def disconnect(self):
try:
self.transport.disconnect()
except:
self.logIfVerbose(traceback.format_exc())
#traceback.print_exc()
for obj in self.componentList:
obj.delTransport()
self.configuration = {}
##
# \brief Sends a command to the radio.
#
# \copydetails CyberRadioDriver::IRadio::sendCommand()
def sendCommand(self,cmdString,timeout=None):
# Sanity-check: Don't bother trying if we don't have a
# transport object, or if it's disconnected
if self.transport is None or not self.transport.isConnected():
return None
# Check if this is an outgoing crdd command. These commands don't
# use JSON framing, so we want to avoid trying to run it through
# the JSON layer (which won't work).
isCrddCommand = cmdString.startswith(self.crddCommandPrefix)
try:
if not isCrddCommand and self.json:
if isinstance(cmdString, str):
jsonCmd = json.loads(cmdString)
elif isinstance(cmdString, dict):
jsonCmd = cmdString
jsonCmd["msg"] = command.jsonConfig.newMessageId()
x = self.transport.sendCommandAndReceive(json.dumps(jsonCmd),timeout)
else:
x = self.transport.sendCommandAndReceive(cmdString, timeout, useJson=False)
if not self.transport.connected:
self.transport.disconnect()
return None
else:
return x
except:
self.logIfVerbose(traceback.format_exc())
#traceback.print_exc()
self.transport.disconnect()
return None
##
# \brief Sets the radio configuration.
#
# \copydetails CyberRadioDriver::IRadio::setConfiguration()
def setConfiguration(self, configDict={}):
if self.isCrddConnection:
return self._crddSetConfiguration(configDict)
else:
with self._setConfigLock:
self.cmdErrorInfo = []
# Normalize the incoming configuration dictionary before trying to process it.
configDict2 = self._normalizeConfigDict(configDict)
success = configKeys.Configurable.setConfiguration(self, **configDict2)
# Tuner configuration
tunerConfDict = configDict2.get(configKeys.CONFIG_TUNER,{})
for index in self.tunerIndexList:
if index in tunerConfDict:
confDict = tunerConfDict[index]
confDict[configKeys.TUNER_INDEX] = index
success &= self.setTunerConfigurationNew(**confDict)
# DDC configuration
for ddcType in [configKeys.CONFIG_WBDDC, configKeys.CONFIG_NBDDC]:
isWideband = (ddcType == configKeys.CONFIG_WBDDC)
ddcConfDict = configDict2.get(configKeys.CONFIG_DDC,{}).get(ddcType,{})
ddcIndexRange = self.wbddcIndexList if isWideband else self.nbddcIndexList
for index in ddcIndexRange:
if index in ddcConfDict:
confDict = ddcConfDict[index]
confDict[configKeys.DDC_INDEX] = index
success &= self.setDdcConfigurationNew(wideband=isWideband, **confDict)
# IP configuration
success &= self.setIpConfigurationNew(configDict2.get(configKeys.CONFIG_IP, {}))
# Transmitter configuration
txConfDict = configDict2.get(configKeys.CONFIG_TX,{})
for index in self.getTransmitterIndexRange():
if index in txConfDict:
confDict = txConfDict[index]
confDict[configKeys.TX_INDEX] = index
success &= self.setTxConfigurationNew(**confDict)
for ducType in [configKeys.CONFIG_WBDUC, configKeys.CONFIG_NBDUC]:
isWideband = (ducType == configKeys.CONFIG_WBDUC)
ducConfDict = configDict2.get(configKeys.CONFIG_DUC,{}).get(ducType,{})
ducIndexRange = self.wbducIndexList if isWideband else self.nbducIndexList
for index in ducIndexRange:
if index in ducConfDict:
confDict = ducConfDict[index]
confDict[configKeys.DUC_INDEX] = index
success &= self.setDucConfigurationNew(wideband=isWideband, **confDict)
# DDC group configuration
for ddcType in [configKeys.CONFIG_WBDDC_GROUP, configKeys.CONFIG_NBDDC_GROUP]:
# Flag for forcing the driver to query DDCs for status information
forceDdcQuery = False
isWideband = (ddcType == configKeys.CONFIG_WBDDC_GROUP)
ddcGroupConfDict = configDict2.get(configKeys.CONFIG_DDC_GROUP,{}).get(ddcType,{})
ddcGroupIndexRange = self.wbddcGroupIndexList if isWideband else self.nbddcGroupIndexList
for index in ddcGroupIndexRange:
if index in ddcGroupConfDict:
confDict = ddcGroupConfDict[index]
confDict[configKeys.INDEX] = index
success &= self.setDdcGroupConfigurationNew(wideband=isWideband, **confDict)
# Force DDC query if DDC grouping configuration gets changed
forceDdcQuery = True
# This section forces hardware queries to update the corresponding DDC
# and DDC group configurations.
if forceDdcQuery:
ddcDict = self.wbddcDict if isWideband else self.nbddcDict
for i in self._getIndexList(None, ddcDict):
ddcDict[i]._queryConfiguration()
ddcGroupDict = self.wbddcGroupDict if isWideband else self.nbddcGroupDict
for i in self._getIndexList(None, ddcGroupDict):
ddcGroupDict[i]._queryConfiguration()
# Combined DDC group configuration
for ddcType in [configKeys.CONFIG_COMBINED_DDC_GROUP]:
#self.logIfVerbose("[ndr551][setConfiguration()] Configure combined DDCs")
# Flag for forcing the driver to query DDCs for status information
forceDdcQuery = False
ddcGroupConfDict = configDict2.get(configKeys.CONFIG_DDC_GROUP,{}).get(ddcType,{})
ddcGroupIndexRange = self.cddcGroupIndexList
for index in ddcGroupIndexRange:
if index in ddcGroupConfDict:
confDict = ddcGroupConfDict[index]
confDict[configKeys.INDEX] = index
#self.logIfVerbose("[ndr551][setConfiguration()] Combined DDC", index)
#self.logIfVerbose("[ndr551][setConfiguration()] %s" % confDict)
success &= self.setCombinedDdcGroupConfigurationNew(**confDict)
# Force DDC query if DDC grouping configuration gets changed
forceDdcQuery = True
# This section forces hardware queries to update the corresponding DDC
# and DDC group configurations.
if forceDdcQuery:
for isWideband in [True, False]:
ddcDict = self.wbddcDict if isWideband else self.nbddcDict
for i in self._getIndexList(None, ddcDict):
ddcDict[i]._queryConfiguration()
ddcGroupDict = self.cddcGroupDict
for i in self._getIndexList(None, ddcGroupDict):
ddcGroupDict[i]._queryConfiguration()
# DUC configuration
for ducType in [configKeys.CONFIG_WBDUC_GROUP]:
# Flag for forcing the driver to query DUCs for status information
forceDucQuery = False
isWideband = (ducType == configKeys.CONFIG_WBDUC_GROUP)
ducGroupConfDict = configDict2.get(configKeys.CONFIG_DUC_GROUP,{}).get(ducType,{})
ducGroupIndexRange = self.wbducGroupIndexList if isWideband else self.nbducGroupIndexList
for index in ducGroupIndexRange:
if index in ducGroupConfDict:
confDict = ducGroupConfDict[index]
confDict[configKeys.INDEX] = index
success &= self.setDucGroupConfigurationNew(wideband=isWideband, **confDict)
# Force DUC query if DUC grouping configuration gets changed
forceDucQuery = True
# This section forces hardware queries to update the corresponding DUC
# and DUC group configurations.
if forceDucQuery:
ducDict = self.wbducDict if isWideband else self.nbducDict
for i in self._getIndexList(None, ducDict):
ducDict[i]._queryConfiguration()
ducGroupDict = self.wbducGroupDict if isWideband else self.nbducGroupDict
for i in self._getIndexList(None, ducGroupDict):
ducGroupDict[i]._queryConfiguration()
# FFT streaming configuration
fftStreamConfDict = configDict2.get(configKeys.CONFIG_FFT,{})
for index in self.fftStreamIndexList:
if index in fftStreamConfDict:
confDict = fftStreamConfDict[index]
confDict[configKeys.FFT_INDEX] = index
success &= self.setFftStreamConfiguration(**confDict)
# Tuner group configuration
forceTunerQuery = False
tunerGroupConfDict = configDict2.get(configKeys.CONFIG_TUNER_GROUP,{})
tunerGroupIndexRange = self.tunerGroupIndexList
for index in tunerGroupIndexRange:
if index in tunerGroupConfDict:
confDict = tunerGroupConfDict[index]
confDict[configKeys.INDEX] = index
success &= self.setTunerGroupConfigurationNew(**confDict)
# Force tuner query if tuner grouping configuration gets changed
forceTunerQuery = True
if forceTunerQuery:
for i in self._getIndexList(None, self.tunerDict):
self.tunerDict[i]._queryConfiguration()
for i in self._getIndexList(None, self.tunerGroupDict):
self.tunerGroupDict[i]._queryConfiguration()
return success
##
# \brief Sets the radio configuration based on a sequence of configuration
# dictionary keys.
#
# \copydetails CyberRadioDriver::IRadio::setConfigurationByKeys()
def setConfigurationByKeys(self, value=None, *keys):
configDict = {}
self._dictEnsureEntrySet(configDict, value, *keys)
return self.setConfiguration(configDict)
##
# \brief Gets the radio configuration.
#
# \copydetails CyberRadioDriver::IRadio::getConfiguration()
def getConfiguration(self):
ret = None
if self.isCrddConnection:
ret = self._crddGetConfiguration()
else:
self.cmdErrorInfo = []
ret = configKeys.Configurable.getConfiguration(self)
# Get tuner configuration
if self.numTuner > 0:
ret[configKeys.CONFIG_TUNER] = self.getTunerConfigurationNew()
# Get DDC configuration
if self.numWbddc > 0:
ret[configKeys.CONFIG_DDC] = {}
# -- Wideband
ret[configKeys.CONFIG_DDC][configKeys.CONFIG_WBDDC] = self.getDdcConfigurationNew(wideband=True)
if self.numNbddc > 0:
# -- Narrowband
ret[configKeys.CONFIG_DDC][configKeys.CONFIG_NBDDC] = self.getDdcConfigurationNew(wideband=False)
if self.numFftStream > 0:
ret[configKeys.CONFIG_FFT] = self.getFftStreamConfiguration()
# Get transmitter configuration
if self.numTxs > 0:
ret[configKeys.CONFIG_TX] = self.getTxConfigurationNew()
# Get DUC configuration
if self.numTxs > 0:
ret[configKeys.CONFIG_DUC] = {}
# -- Wideband
ret[configKeys.CONFIG_DUC][configKeys.CONFIG_WBDUC] = self.getDucConfigurationNew(wideband=True)
if self.numNbduc > 0:
# -- Narrowband
ret[configKeys.CONFIG_DDC][configKeys.CONFIG_NBDUC] = self.getDucConfigurationNew(wideband=False)
# Get DDC group configuration
if self.numWbddcGroups > 0:
ret[configKeys.CONFIG_DDC_GROUP] = {}
# -- Wideband
ret[configKeys.CONFIG_DDC_GROUP][configKeys.CONFIG_WBDDC_GROUP] = \
self.getDdcGroupConfigurationNew(wideband=True)
if self.numNbddcGroups > 0:
# -- Narrowband
ret[configKeys.CONFIG_DDC_GROUP][configKeys.CONFIG_NBDDC_GROUP] = \
self.getDdcGroupConfigurationNew(wideband=False)
elif self.numCddcGroups > 0:
ret[configKeys.CONFIG_DDC_GROUP] = {}
ret[configKeys.CONFIG_DDC_GROUP][configKeys.CONFIG_COMBINED_DDC_GROUP] = \
self.getCombinedDdcGroupConfigurationNew()
# Get DUC group configuration
if self.numWbducGroups > 0:
ret[configKeys.CONFIG_DUC_GROUP] = {}
# -- Wideband
ret[configKeys.CONFIG_DUC_GROUP][configKeys.CONFIG_WBDUC_GROUP] = \
self.getDucGroupConfigurationNew(wideband=True)
# if self.numNbducGroups > 0:
# # -- Narrowband
# ret[configKeys.CONFIG_DUC_GROUP][configKeys.CONFIG_NBDUC_GROUP] = \
# self.getDucGroupConfigurationNew(wideband=False)
# Get tuner group configuration
if self.numTunerGroups > 0:
ret[configKeys.CONFIG_TUNER_GROUP] = \
self.getTunerGroupConfigurationNew()
return ret
##
# \brief Gets radio configuration information based on a sequence of configuration
# dictionary keys.
#
# \copydetails CyberRadioDriver::IRadio::getConfigurationByKeys()
def getConfigurationByKeys(self, *keys):
return self._dictSafeGet(self.getConfiguration(), None, *keys)
##
# \brief Queries the radio hardware to get its configuration.
#
# \copydetails CyberRadioDriver::IRadio::queryConfiguration()
def queryConfiguration(self):
return self.queryConfigurationByKeys()
##
# \brief Queries the radio hardware to get a subset of configuration information,
# based on a sequence of configuration dictionary keys.
#
# \copydetails CyberRadioDriver::IRadio::queryConfigurationByKeys()
def queryConfigurationByKeys(self, *keys):
self.cmdErrorInfo = []
ret = {}
if self.isCrddConnection:
ret = self._crddQueryConfigurationByKeys(*keys)
else:
if len(keys) == 0:
ret = configKeys.Configurable.queryConfiguration(self)
# Get tuner configuration
if self.numTuner > 0:
if len(keys) == 0:
ret[configKeys.CONFIG_TUNER] = self.queryTunerConfigurationNew(tunerIndex=None)
elif len(keys) > 0 and keys[0] == configKeys.CONFIG_TUNER:
tunerIndex = None if len(keys) == 1 else int(keys[1])
ret[configKeys.CONFIG_TUNER] = self.queryTunerConfigurationNew(tunerIndex=tunerIndex)
# Get DDC configuration
if self.numWbddc > 0:
if len(keys) == 0 or keys[0] == configKeys.CONFIG_DDC:
ret[configKeys.CONFIG_DDC] = {}
# -- Wideband
if len(keys) < 2:
ret[configKeys.CONFIG_DDC][configKeys.CONFIG_WBDDC] = self.queryDdcConfigurationNew(
wideband=True, ddcIndex=None)
elif keys[1] == configKeys.CONFIG_WBDDC:
ddcIndex = None if len(keys) == 2 else int(keys[2])
ret[configKeys.CONFIG_DDC][configKeys.CONFIG_WBDDC] = self.queryDdcConfigurationNew(
wideband=True, ddcIndex=ddcIndex)
# -- Narrowband
if self.numNbddc > 0:
if len(keys) < 2:
ret[configKeys.CONFIG_DDC][configKeys.CONFIG_NBDDC] = self.queryDdcConfigurationNew(
wideband=False, ddcIndex=None)
elif keys[1] == configKeys.CONFIG_NBDDC:
ddcIndex = None if len(keys) == 2 else int(keys[2])
ret[configKeys.CONFIG_DDC][configKeys.CONFIG_NBDDC] = self.queryDdcConfigurationNew(
wideband=False, ddcIndex=ddcIndex)
# Get FFT Stream configuration
if self.numFftStream > 0:
if len(keys) == 0:
ret[configKeys.CONFIG_FFT] = self.queryFftStreamConfiguration(fftStreamIndex=None)
elif len(keys) > 0 and keys[0] == configKeys.CONFIG_FFT:
fftStreamIndex = None if len(keys) == 1 else int(keys[1])
ret[configKeys.CONFIG_FFT] = self.queryFftStreamConfiguration(fftStreamIndex=fftStreamIndex)
# Get transmitter configuration
if self.numTxs > 0:
if len(keys) == 0:
ret[configKeys.CONFIG_TX] = self.queryTxConfigurationNew(txIndex=None)
elif len(keys) > 0 and keys[0] == configKeys.CONFIG_TX:
txIndex = None if len(keys) == 1 else int(keys[1])
ret[configKeys.CONFIG_TX] = self.queryTxConfigurationNew(txIndex=txIndex)
# Get DUC configuration
if self.numTxs > 0:
if len(keys) == 0 or keys[0] == configKeys.CONFIG_DUC:
ret[configKeys.CONFIG_DUC] = {}
# -- Wideband
if len(keys) < 2:
ret[configKeys.CONFIG_DUC][configKeys.CONFIG_WBDUC] = self.queryDucConfigurationNew(
wideband=True, ducIndex=None)
elif keys[1] == configKeys.CONFIG_WBDUC:
ducIndex = None if len(keys) == 2 else int(keys[2])
ret[configKeys.CONFIG_DUC][configKeys.CONFIG_WBDUC] = self.queryDucConfigurationNew(
wideband=True, ducIndex=ducIndex)
# -- Narrowband
if self.numNbduc > 0:
if len(keys) < 2:
ret[configKeys.CONFIG_DUC][configKeys.CONFIG_NBDUC] = self.queryDucConfigurationNew(
wideband=False, ducIndex=None)
elif keys[1] == configKeys.CONFIG_NBDUC:
ducIndex = None if len(keys) == 2 else int(keys[2])
ret[configKeys.CONFIG_DUC][configKeys.CONFIG_NBDUC] = self.queryDucConfigurationNew(
wideband=False, ducIndex=ducIndex)
# Get DDC group configuration
if any( [self.numWbddcGroups > 0, self.numNbddcGroups > 0, self.numCddcGroups > 0] ):
if len(keys) == 0 or keys[0] == configKeys.CONFIG_DDC_GROUP:
ret[configKeys.CONFIG_DDC_GROUP] = {}
# -- Wideband
if self.numWbddcGroups > 0:
if len(keys) < 2:
ret[configKeys.CONFIG_DDC_GROUP][configKeys.CONFIG_WBDDC_GROUP] = \
self.queryDdcGroupConfigurationNew(wideband=True, ddcGroupIndex=None)
elif keys[1] == configKeys.CONFIG_WBDDC_GROUP:
ddcGroupIndex = None if len(keys) == 2 else int(keys[2])
ret[configKeys.CONFIG_DDC_GROUP][configKeys.CONFIG_WBDDC_GROUP] = \
self.queryDdcGroupConfigurationNew(wideband=True, ddcGroupIndex=ddcGroupIndex)
# -- Narrowband
if self.numNbddcGroups > 0:
if len(keys) < 2:
ret[configKeys.CONFIG_DDC_GROUP][configKeys.CONFIG_NBDDC_GROUP] = \
self.queryDdcGroupConfigurationNew(wideband=False, ddcGroupIndex=None)
elif keys[1] == configKeys.CONFIG_NBDDC_GROUP:
ddcGroupIndex = None if len(keys) == 2 else int(keys[2])
ret[configKeys.CONFIG_DDC_GROUP][configKeys.CONFIG_NBDDC_GROUP] = \
self.queryDdcGroupConfigurationNew(wideband=False, ddcGroupIndex=ddcGroupIndex)
# -- Combined (wideband + narrowband)
if self.numCddcGroups > 0:
if len(keys) < 2:
ret[configKeys.CONFIG_DDC_GROUP][configKeys.CONFIG_COMBINED_DDC_GROUP] = \
self.queryCombinedDdcGroupConfigurationNew(ddcGroupIndex=None)
elif keys[1] == configKeys.CONFIG_COMBINED_DDC_GROUP:
ddcGroupIndex = None if len(keys) == 2 else int(keys[2])
ret[configKeys.CONFIG_DDC_GROUP][configKeys.CONFIG_COMBINED_DDC_GROUP] = \
self.queryCombinedDdcGroupConfigurationNew(ddcGroupIndex=ddcGroupIndex)
# Get DUC group configuration
if any( [self.numWbducGroups > 0] ):
if len(keys) == 0 or keys[0] == configKeys.CONFIG_DUC_GROUP:
ret[configKeys.CONFIG_DUC_GROUP] = {}
# -- Wideband
if self.numWbducGroups > 0:
if len(keys) < 2:
ret[configKeys.CONFIG_DUC_GROUP][configKeys.CONFIG_WBDUC_GROUP] = \
self.queryDucGroupConfigurationNew(wideband=True, ducGroupIndex=None)
elif keys[1] == configKeys.CONFIG_WBDUC_GROUP:
ducGroupIndex = None if len(keys) == 2 else int(keys[2])
ret[configKeys.CONFIG_DUC_GROUP][configKeys.CONFIG_WBDUC_GROUP] = \
self.queryDucGroupConfigurationNew(wideband=True, ducGroupIndex=ducGroupIndex)
# Get tuner group configuration
if self.numTunerGroups > 0:
if len(keys) == 0:
ret[configKeys.CONFIG_TUNER_GROUP] = self.queryTunerGroupConfigurationNew(
tunerGroupIndex=None)
elif len(keys) > 0 and keys[0] == configKeys.CONFIG_TUNER_GROUP:
tunerGroupIndex = None if len(keys) == 1 else int(keys[1])
ret[configKeys.CONFIG_TUNER_GROUP] = self.queryTunerGroupConfigurationNew(
tunerGroupIndex=tunerGroupIndex)
# Query IP configuration
if len(keys) == 0 or keys[0] == configKeys.CONFIG_IP:
if len(keys) == 0:
ret[configKeys.CONFIG_IP] = self.queryIpConfigurationNew(gigEPortIndex=None)
elif len(keys) > 0 and keys[0] == configKeys.CONFIG_IP:
gigEPortIndex = None if len(keys) == 1 else int(keys[1])
ret[configKeys.CONFIG_IP] = self.queryIpConfigurationNew(gigEPortIndex=gigEPortIndex)
# Update the internal configuration dictionary based on query results
self.configuration.update(ret)
# Return the result
return ret
##
# \protected
# \brief Queries hardware to determine the object's current configuration.
def _queryConfiguration(self):
# Call the base-class implementation
configKeys.Configurable._queryConfiguration(self)
# Override
for cmdClass, configKey in [ \
(self.cfgCmd, configKeys.CONFIG_MODE), \
(self.refCmd, configKeys.REFERENCE_MODE), \
(self.rbypCmd, configKeys.BYPASS_MODE), \
(self.calfCmd, configKeys.CALIB_FREQUENCY), \
(self.fnrCmd, configKeys.FNR_MODE), \
(self.gpsCmd, configKeys.GPS_ENABLE), \
(self.rtvCmd, configKeys.REF_TUNING_VOLT), \
(self.fpgaStateCmd, configKeys.FPGA_STATE), \
(self.funCmd, configKeys.RADIO_FUNCTION), \
(self.refCmd, configKeys.STATUS_PPS_SOURCE), \
# (self.cntrlCmd, configKeys.CNTRL_IF_OUT), \
]:
if cmdClass is not None:
cmd = cmdClass(parent=self,
query=True,
verbose=self.verbose, logFile=self.logFile)
cmd.send( self.sendCommand, )
self._addLastCommandErrorInfo(cmd)
rspInfo = cmd.getResponseInfo()
#self.logIfVerbose("DEBUG:", cmd.mnemonic, "rspInfo=", rspInfo)
if rspInfo is not None:
self.configuration[configKey] = rspInfo.get(configKey, 0)
# IP configuration query -- The format of this section depends on whether
# the radio has Gigabit Ethernet ports on it or not.
if configKeys.CONFIG_IP not in self.configuration:
self.configuration[configKeys.CONFIG_IP] = {}
self.configuration[configKeys.CONFIG_IP].update( self.queryIpConfigurationNew() )
##
# \protected
# \brief Issues hardware commands to set the object's current configuration.
def _setConfiguration(self, confDict):
ret = True
for cmdClass, configKey in [ \
(self.cfgCmd, configKeys.CONFIG_MODE), \
(self.refCmd, configKeys.REFERENCE_MODE), \
(self.rbypCmd, configKeys.BYPASS_MODE), \
(self.calfCmd, configKeys.CALIB_FREQUENCY), \
(self.fnrCmd, configKeys.FNR_MODE), \
(self.gpsCmd, configKeys.GPS_ENABLE), \
(self.rtvCmd, configKeys.REF_TUNING_VOLT), \
(self.fpgaStateCmd, configKeys.FPGA_STATE), \
(self.refCmd, configKeys.STATUS_PPS_SOURCE), \
(self.cntrlCmd, configKeys.CNTRL_IF_OUT), \
]:
cDict = { "parent": self, \
"verbose": self.verbose, \
"logFile": self.logFile, \
configKey: confDict.get(configKey, 0)
}
if configKey in confDict and cmdClass is not None and \
cmdClass.settable:
cmd = cmdClass(**cDict)
ret &= cmd.send( self.sendCommand, )
ret &= cmd.success
self._addLastCommandErrorInfo(cmd)
if ret:
self.configuration[configKey] = getattr(cmd, configKey)
pass
return ret
##
# \protected
# \brief Gets whether or not the given (nested) dictionary has an entry for the given keys.
#
# \param dicty The dictionary to search.
# \param keys A number of comma-separated search keys, each pointing to a deeper level
# of the dictionary hierarchy.
# \return True if the dictionary has the entry, False otherwise.
def _dictHasEntry(self, dicty, *keys):
ret = True
keysOk = [ q != "" for q in keys ]
if all(keysOk):
tmp = dicty
for key in keys:
if key not in tmp:
ret = False
break
else:
tmp = tmp[key]
else:
ret = False
return ret
##
# \protected
# \brief Ensures that we make an entry in the given dictionary with the specified keys, using
# the provided default for the entry.
#
# @param dicty The dictionary to manipulate.
# @param default The default value to use for the entry if it does not already exist.
# @param keys A number of comma-separated search keys, each pointing to a deeper level
# of the dictionary hierarchy.
def _dictEnsureEntry(self, dicty, default, *keys):
tmp = dicty
# Create intermediate sub-dicts if needed
for i, key in enumerate(keys):
if i < len(keys)-1:
if key not in tmp:
#print "[DBG] sub-dict key", key, "not present"
tmp[key] = {}
else:
#print "[DBG] sub-dict key", key, "present"
pass
tmp = tmp[key]
else:
if key not in tmp:
#print "[DBG] value key", key, "not present"
tmp[key] = default
else:
#print "[DBG] value key", key, "present"
pass
pass
##
# \protected
# \brief Ensures that a given nested dictionary item is set to the provided value,
# even if the item does not already exist.
# \param dicty The dictionary to manipulate.
# \param value The value to set the entry to.
# \param keys A number of comma-separated search keys, each pointing to a deeper level
# of the dictionary hierarchy.
def _dictEnsureEntrySet(self, dicty, value, *keys):
self._dictEnsureEntry(dicty, value, *keys)
tmp = dicty
for i, key in enumerate(keys):
if i < len(keys)-1:
tmp = tmp[key]
else:
try:
tmp[key] = copy.deepcopy(value)
except:
tmp[key] = value
##
# \protected
# \brief Gets a value from a dictionary in a "safe" way, using a default in case there is
# no entry for the given set of keys.
#
# \param dicty The dictionary to query.
# \param default The default value to use if the keys do not point to a valid entry.
# \param keys A number of comma-separated search keys, each pointing to a deeper level
# of the dictionary hierarchy.
# \return The entry from the dictionary, or the default if the entry does not exist.
def _dictSafeGet(self, dicty, default, *keys):
ret = default if len(keys) > 0 else dicty
if self._dictHasEntry(dicty, *keys):
tmp = dicty
for i, key in enumerate(keys):
if i < len(keys)-1:
tmp = tmp[key]
else:
ret = tmp[key]
return ret
##
# \internal
# \brief Initializes the radio handler object after connecting to crdd.
#
def _crddInitialize(self):
# Optionally, send crdd our client ID
if self.clientId is not None:
rsp = self._crddSendCommand(cmd="CLIENTID", data=self.clientId)
# Get the radio's current configuration from crdd
self._crddGetConfiguration()
pass
##
# \internal
# \brief Sends a command to crdd.
# \note This capability does not depend on whether the radio is JSON or not.
# \param cmd Command mnemonic
# \param data Data to send as a command parameter. What actually gets sent
# over the link is this object's string representation. Can be None, in
# which case only the command gets sent.
# \returns Either a list of response strings (if the command completed
# successfully), or None (if it did not).
def _crddSendCommand(self, cmd, data=None):
outCmd = self.crddCommandPrefix + str(cmd)
if data is not None:
outCmd += " " + str(data)
outCmd += "\n"
return self.sendCommand(outCmd)
##
# \internal
# \brief Unpacks the provided configuration dictionary, setting the
# configuration of all components.
# \param configuration Fully-specified configuration dictionary.
def _crddUnpackConfiguration(self, configuration):
# Unpack the full configuration
fullConfiguration = copy.deepcopy(configuration)
# -- Tuner configuration
cDict = fullConfiguration.pop(configKeys.CONFIG_TUNER, {})
for index in list(cDict.keys()):
self.tunerDict[index].configuration = cDict[index]
# -- DDC configuration
cDict = fullConfiguration.pop(configKeys.CONFIG_DDC, {})
for ddcType in list(cDict.keys()):
ddcDict = self.wbddcDict
if ddcType == "narrowband":
ddcDict = self.nbddcDict
for index in list(cDict[ddcType].keys()):
ddcDict[index].configuration = cDict[ddcType][index]
# -- FFT streams
cDict = fullConfiguration.pop(configKeys.CONFIG_FFT, {})
for index in list(cDict.keys()):
self.fftStreamDict[index].configuration = cDict[index]
# -- TX configuration
cDict = fullConfiguration.pop(configKeys.CONFIG_TX, {})
for index in list(cDict.keys()):
cDict2 = cDict[index].pop(configKeys.CONFIG_CW, {})
for index2 in list(cDict2.keys()):
self.txDict[index].toneGenDict[index2].configuration = cDict2[index2]
self.txDict[index].configuration = cDict[index]
# -- DUC configuration
cDict = fullConfiguration.pop(configKeys.CONFIG_DUC, {})
for ducType in list(cDict.keys()):
ducDict = self.wbducDict
if ducType == "narrowband":
ducDict = self.nbducDict
for index in list(cDict[ducType].keys()):
ducDict[index].configuration = cDict[ducType][index]
# -- DDC group configuration
cDict = fullConfiguration.pop(configKeys.CONFIG_DDC_GROUP, {})
for ddcType in list(cDict.keys()):
ddcDict = self.wbddcGroupDict
if ddcType == "narrowband":
ddcDict = self.nbddcGroupDict
elif ddcType == "combined":
ddcDict = self.cddcGroupDict
for index in list(cDict[ddcType].keys()):
ddcDict[index].configuration = cDict[ddcType][index]
# -- WBDUC groups
cDict = fullConfiguration.pop(configKeys.CONFIG_DUC_GROUP, {})
for ducType in list(cDict.keys()):
ducDict = self.wbducGroupDict
#if ducType == "narrowband":
# ducDict = self.nbducGroupDict
for index in list(cDict[ducType].keys()):
ducDict[index].configuration = cDict[ducType][index]
# -- Tuner groups
cDict = fullConfiguration.pop(configKeys.CONFIG_TUNER_GROUP, {})
for index in list(cDict.keys()):
self.tunerGroupDict[index].configuration = cDict[index]
# -- What is left after all the popping are the radio-specific
# config items, and the IP config
self.configuration = fullConfiguration
pass
##
# \internal
# \brief Gets the radio's current configuration from crdd.
# \note This capability does not depend on whether the radio is JSON or not.
# \returns Either the returned configuration dictionary (if the command
# completed successfully), or an empty dictionary (if it did not).
def _crddGetConfiguration(self):
ret = {}
# Get the radio's current configuration from crdd
rsp = self._crddSendCommand(cmd="GETCFG", data=None)
# Deal with out-of-bound conditions
try:
if all( [
rsp is not None,
rsp != "Empty Read",
rsp[0] != "TIMEOUT"
] ):
# Get the returned full configuration by running the first response
# string (the config dict) through ast.literal_eval().
ret = ast.literal_eval(rsp[0])
# Unpack the full configuration
self._crddUnpackConfiguration(ret)
except:
pass
return ret
##
# \internal
# \brief Sets the radio's current configuration using crdd.
# \note This capability does not depend on whether the radio is JSON or not.
# \return True if all commands completed successfully, False otherwise.
# Use getLastCommandErrorInfo() to retrieve any error information.
def _crddSetConfiguration(self, configDict={}):
ret = False
# Get the radio's current configuration from crdd
rsp = self._crddSendCommand(cmd="SETCFG", data=configDict)
# Deal with out-of-bound conditions
try:
if all( [
rsp is not None,
rsp != "Empty Read",
rsp[0] != "TIMEOUT"
] ):
#self.log("[DBG] rsp =", str(rsp))
# First response string: SUCCESS or ERROR (plus error info)
ret = ( rsp[0] == "SUCCESS" )
if not ret:
# Grab the error info (serialized as a list of strings)
self.cmdErrorInfo = ast.literal_eval(rsp[0].replace("ERROR: ", ""))
# Second response string: Updated configuration dictionary string.
# Run this through ast.literal_eval().
configuration = ast.literal_eval(rsp[1])
# Unpack the full configuration
self._crddUnpackConfiguration(configuration)
except:
pass
return ret
##
# \internal
# \brief Queries the radio's current configuration from crdd.
# \note This capability does not depend on whether the radio is JSON or not.
# \param keys List of keys used to specify which configuration values to query.
# \returns Either the returned configuration dictionary (if the command
# completed successfully), or an empty dictionary (if it did not).
def _crddQueryConfigurationByKeys(self, *keys):
ret = {}
# Query the radio's current configuration from crdd
rsp = self._crddSendCommand(cmd="QUERYCFGK", data=list(keys))
# Deal with out-of-bound conditions
try:
if all( [
rsp is not None,
rsp != "Empty Read",
rsp[0] != "TIMEOUT"
] ):
# Get the returned configuration by running the first response
# string (the config dict) through ast.literal_eval().
ret = ast.literal_eval(rsp[0])
except:
pass
return ret
##
# \internal
# \brief Gets the list of currently connected data port indices from crdd.
# \note This capability does not depend on whether the radio is JSON or not.
# \returns Either the returned data port list (if the command
# completed successfully), or an empty list (if it did not).
def _crddGetConnectedDataPortIndices(self):
ret = []
# Get the radio's current configuration from crdd
rsp = self._crddSendCommand(cmd="QUERYCDPS", data=None)
# Deal with out-of-bound conditions
try:
if all( [
rsp is not None,
rsp != "Empty Read",
rsp[0] != "TIMEOUT"
] ):
# Get the returned list by running the first response
# string (the data port list) through ast.literal_eval().
ret = ast.literal_eval(rsp[0])
except:
pass
return ret
##
# \internal
# \brief Helper method for converting Unicode strings to ASCII strings
# during the JSON conversion process.
#
# The JSON-formatted string will have elements whose names
# correspond to the names of this entity's attributes.
#
# \param data The entity being encoded as JSON.
@staticmethod
def encodeJsonAsAscii(data):
def _foo(item):
ret = item
if isinstance(item, str):
ret = item.encode('ascii')
elif isinstance(item, list):
ret = [ _foo(q) for q in item ]
elif isinstance(item, dict):
ret = { _foo(key): _foo(value) for key, value in item.items() }
return ret
adjPairs = []
for pair in data:
adjPairs.append( (_foo(pair[0]), _foo(pair[1])) )
return dict(adjPairs)
##
# \brief Resets the radio.
#
# \copydetails CyberRadioDriver::IRadio::sendReset()
def sendReset(self, resetType=None):
if self.resetCmd is not None:
cDict = { "parent": self,
"verbose": self.verbose,
"logFile": self.logFile,
configKeys.RESET_TYPE: resetType,
}
cmd = self.resetCmd(**cDict)
cmd.send( self.sendCommand, )
return cmd.success
else:
return False
#time.sleep(20)
#self.connect(self.mode,self.host_or_dev,self.port_or_baudrate)
##
# \brief Gets the pulse-per-second (PPS) rising edge from the radio.
#
# \copydetails CyberRadioDriver::IRadio::getPps()
def getPps(self):
if self.ppsCmd is not None:
cmd = command.pps(parent=self,query=True,
verbose=self.verbose, logFile=self.logFile)
cmd.send(self.sendCommand, timeout=cmd.timeout)
return cmd.success
else:
return False
##
# \brief Sets the time for the next PPS rising edge on the radio.
#
# \copydetails CyberRadioDriver::IRadio::setTimeNextPps()
def setTimeNextPps(self,checkTime=False,useGpsTime=False,newPpsTime=None):
if self.ppsCmd is not None and self.utcCmd is not None:
if self.getPps():
if newPpsTime is not None:
nextSecond = int( _radio.timeFromString(newPpsTime, utc=True) )
cmd = self.utcCmd( parent=self, utcTime=str(nextSecond),
verbose=self.verbose, logFile=self.logFile )
elif useGpsTime:
cmd = self.utcCmd( parent=self, utcTime="g" )
else:
nextSecond = int( math.floor( time.time() ) )+1
cmd = self.utcCmd( parent=self, utcTime=str(nextSecond),
verbose=self.verbose, logFile=self.logFile )
cmd.send( self.sendCommand, timeout=cmd.timeout )
if checkTime:
radioUtc = self.getTimeNextPps()
self.logIfVerbose("Set time = %d & Query time = %d" % (nextSecond,radioUtc))
return radioUtc==nextSecond
else:
return cmd.success
else:
self.log("ERROR, ERROR, ERROR".center(80,"!"))
return False
else:
return False
##
# \brief Gets the current radio time.
#
# \copydetails CyberRadioDriver::IRadio::getTimeNow()
def getTimeNow(self):
if self.utcCmd is not None:
cmd = self.utcCmd( parent=self, query=True,
verbose=self.verbose, logFile=self.logFile )
cmd.send( self.sendCommand, timeout=cmd.timeout )
return cmd.getResponseInfo().get(configKeys.TIME_UTC, None)
else:
return None
##
# \brief Gets the time for the next PPS rising edge on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTimeNextPps()
def getTimeNextPps(self):
if self.ppsCmd is not None and self.utcCmd is not None:
if self.getPps():
cmd = self.utcCmd( parent=self, query=True,
verbose=self.verbose, logFile=self.logFile )
cmd.send( self.sendCommand, timeout=cmd.timeout )
return cmd.getResponseInfo().get(configKeys.TIME_UTC, None)
else:
return None
else:
return None
##
# \brief Gets the status from the radio.
#
# \copydetails CyberRadioDriver::IRadio::getStatus()
def getStatus(self):
if self.statQry is not None:
cmd = self.statQry(parent=self,
verbose=self.verbose, logFile=self.logFile)
cmd.send( self.sendCommand )
return cmd.getResponseInfo()
else:
self.log("No status query available.")
return None
##
# \brief Gets the RF tuner status from the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTstatus()
def getTstatus(self):
if self.tstatQry is not None:
cmd = self.tstatQry(parent=self,
verbose=self.verbose, logFile=self.logFile)
cmd.send( self.sendCommand )
return cmd.getResponseInfo()
else:
self.log("No tuner status query available.")
return None
##
# \brief Sets the reference mode on the radio.
#
# \copydetails CyberRadioDriver::IRadio::setReferenceMode()
def setReferenceMode(self,mode):
try:
modeInt = int(mode) if int(mode) in list(self.refModes.keys()) else None
except:
modeInt = None
if modeInt is not None and self.refCmd is not None:
self.logIfVerbose("Setting reference mode %d (%s)"%(modeInt,self.refModes.get(modeInt)))
cmd = self.refCmd(parent=self, referenceMode=modeInt,
verbose=self.verbose, logFile=self.logFile)
ret = cmd.send( self.sendCommand )
if ret and cmd.success:
self.configuration[configKeys.REFERENCE_MODE] = getattr(cmd, configKeys.REFERENCE_MODE)
return cmd.success
else:
return False
##
# \brief Sets the reference bypass mode on the radio.
#
# \copydetails CyberRadioDriver::IRadio::setBypassMode()
def setBypassMode(self,mode):
try:
modeInt = int(mode) if int(mode) in list(self.rbypModes.keys()) else None
except:
modeInt = None
if modeInt is not None and self.rbypCmd is not None:
self.logIfVerbose("Setting bypass mode %d (%s)"%(modeInt,self.rbypModes.get(modeInt)))
cmd = self.rbypCmd(parent=self, bypassMode=modeInt,
verbose=self.verbose, logFile=self.logFile)
ret = cmd.send( self.sendCommand )
if ret and cmd.success:
self.configuration[configKeys.BYPASS_MODE] = getattr(cmd, configKeys.BYPASS_MODE)
return cmd.success
else:
return False
##
# \brief Sets the time adjustment for tuners on the radio.
#
# \copydetails CyberRadioDriver::IRadio::setTimeAdjustment()
def setTimeAdjustment(self, tunerIndex=None, timeAdjustValue=0):
if self.tadjCmd is not None:
success = True
for i in self._getIndexList(tunerIndex, self.tunerDict):
# cmd = self.tadjCmd(parent=self,index=i, timingAdjustment=timeAdjustValue,
# verbose=self.verbose, logFile=self.logFile)
# success &= cmd.send( self.sendCommand )
success &= self.setConfiguration( {
configKeys.CONFIG_TUNER : {
i: {
configKeys.TUNER_TIMING_ADJ: timeAdjustValue,
}
}
} )
return success
else:
return False
##
# \brief Sets the calibration frequency on the radio.
#
# \copydetails CyberRadioDriver::IRadio::setCalibrationFrequency()
def setCalibrationFrequency(self, calibFrequency=0):
if self.calfCmd is not None:
cmd = self.calfCmd(parent=self, calibFrequency=calibFrequency,
verbose=self.verbose, logFile=self.logFile)
ret = cmd.send( self.sendCommand )
if ret and cmd.success:
self.configuration[configKeys.CALIB_FREQUENCY] = getattr(cmd, configKeys.CALIB_FREQUENCY)
return cmd.success
else:
return False
##
# \brief Gets the current GPS position.
#
# \copydetails CyberRadioDriver::IRadio::getGpsPosition()
def getGpsPosition(self):
# Helper function that converts GPS coordinates from the NMEA
# format to decimal degrees
def degMinToDecimalDeg(coordinate):
# Converts from [NESW](d)ddmm.mmmm(mm) format to decimal degrees
# degDigits == number of digits used for degrees (2 for lat, 3 for lon)
# Last (decimal places + 3) characters == Minutes
ret = 0.0
# -- Get the sign from the directional indicator
sgn = (-1 if coordinate[0] in ["W", "S"] else 1)
# -- Find the decimal point position
coord = coordinate[1:]
dotPos = coord.find(".")
minLen = len(coord) - dotPos + 2
min = float( coord[-minLen:] )
deg = float( coord[:-minLen] )
if deg < 0.0:
ret = deg - min / 60.0
else:
ret = deg + min / 60.0
ret = ret * sgn
return ret
if self.gposCmd is not None:
cmd = self.gposCmd( parent=self, query=True,
verbose=self.verbose, logFile=self.logFile )
cmd.send( self.sendCommand, timeout=cmd.timeout )
latStr = cmd.getResponseInfo().get(configKeys.GPS_LATITUDE, "N0000.000000")
lonStr = cmd.getResponseInfo().get(configKeys.GPS_LONGITUDE, "E0000.000000")
return ( degMinToDecimalDeg(latStr), degMinToDecimalDeg(lonStr) )
else:
return (0.0, 0.0)
##
# \brief Gets the current radio temperature.
#
# \copydetails CyberRadioDriver::IRadio::getTemperature()
def getTemperature(self):
if self.tempCmd is not None:
cmd = self.tempCmd( parent=self, query=True,
verbose=self.verbose, logFile=self.logFile )
cmd.send( self.sendCommand, timeout=cmd.timeout )
return cmd.getResponseInfo().get(configKeys.TEMPERATURE, 0)
else:
return 0
##
# \brief Gets the current GPIO output bits.
#
# \copydetails CyberRadioDriver::IRadio::getGpioOutput()
def getGpioOutput(self):
if self.gpioStaticCmd is not None:
cmd = self.gpioStaticCmd( parent=self, query=True,
verbose=self.verbose,
logFile=self.logFile )
cmd.send( self.sendCommand, timeout=cmd.timeout )
return cmd.getResponseInfo().get(configKeys.GPIO_VALUE, 0)
else:
return 0
##
# \brief Gets the GPIO output settings for a given sequence index.
#
# \copydetails CyberRadioDriver::IRadio::getGpioOutputByIndex()
def getGpioOutputByIndex(self, index):
if self.gpioSeqCmd is not None:
cmd = self.gpioSeqCmd( parent=self, query=True,
index=index,
verbose=self.verbose,
logFile=self.logFile )
cmd.send( self.sendCommand, timeout=cmd.timeout )
return ( cmd.getResponseInfo().get(configKeys.GPIO_VALUE, 0),
cmd.getResponseInfo().get(configKeys.GPIO_DURATION, 0),
cmd.getResponseInfo().get(configKeys.GPIO_LOOP, 0) )
else:
return (0, 0, 0)
##
# \brief Sets the current GPIO output bits.
#
# \copydetails CyberRadioDriver::IRadio::setGpioOutput()
def setGpioOutput(self, value):
if self.gpioStaticCmd is not None:
cmd = self.gpioStaticCmd(parent=self,
value=value,
verbose=self.verbose, logFile=self.logFile)
ret = cmd.send( self.sendCommand )
return cmd.success
else:
return False
##
# \brief Sets the GPIO output settings for a given sequence index.
#
# \copydetails CyberRadioDriver::IRadio::setGpioOutputByIndex()
def setGpioOutputByIndex(self, index, value, duration, loop, go):
if self.gpioSeqCmd is not None:
cmd = self.gpioSeqCmd(parent=self,
index=index,
value=value,
duration=duration,
loop=loop,
go=go,
verbose=self.verbose, logFile=self.logFile)
ret = cmd.send( self.sendCommand )
return cmd.success
else:
return False
##
# \brief Gets the current bandwith of the given tuner.
# \copydetails CyberRadioDriver::IRadio::getTunerBandwidth()
def getTunerBandwidth(self, tuner):
if tuner not in self.getTunerIndexRange():
raise ValueError("Invalid tuner specified")
ret = self.tunerBandwidthConstant
if self.tunerBandwithSettable:
ifFilter = self.getConfigurationByKeys(
configKeys.CONFIG_TUNER,
tuner,
configKeys.TUNER_IF_FILTER
)
if ifFilter is not None:
ret = ifFilter * 1e6
return ret
##
# \brief Gets the name of the radio.
#
# \copydetails CyberRadioDriver::IRadio::getName()
@classmethod
def getName(cls):
return cls._name
##
# \brief Gets the number of tuners on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNumTuner()
@classmethod
def getNumTuner(cls):
return len(cls.getTunerIndexRange())
##
# \brief Gets the number of tuner boards on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNumTunerBoards()
@classmethod
def getNumTunerBoards(cls):
return cls.numTunerBoards
##
# \brief Gets the index range for the tuners on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTunerIndexRange()
@classmethod
def getTunerIndexRange(cls):
return list(range(cls.tunerIndexBase, cls.tunerIndexBase + cls.numTuner, 1))
##
# \brief Gets the frequency range for the tuners on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTunerFrequencyRange()
@classmethod
def getTunerFrequencyRange(cls):
return cls.tunerType.frqRange
##
# \brief Gets the frequency resolution for tuners on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTunerFrequencyRes()
@classmethod
def getTunerFrequencyRes(cls):
return cls.tunerType.frqRes
##
# \brief Gets the frequency unit for tuners on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTunerFrequencyUnit()
@classmethod
def getTunerFrequencyUnit(cls):
return cls.tunerType.frqUnits
##
# \brief Gets the attenuation range for the tuners on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTunerAttenuationRange()
@classmethod
def getTunerAttenuationRange(cls):
return cls.tunerType.attRange
##
# \brief Gets the attenuation resolution for tuners on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTunerAttenuationRes()
@classmethod
def getTunerAttenuationRes(cls):
return cls.tunerType.attRes
##
# \brief Gets the ifFilter list for the tuners of the radio
#
# \copydetails CyberRadioDriver::IRadio::getTunerIfFilterList()
@classmethod
def getTunerIfFilterList(cls):
return cls.tunerType.ifFilters
##
# \brief Gets whether or not the radio supports setting tuner
# bandwidth
#
# \copydetails CyberRadioDriver::IRadio::isTunerBandwidthSettable()
@classmethod
def isTunerBandwidthSettable(cls):
return cls.tunerBandwithSettable
##
# \brief Gets the number of wideband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNumWbddc()
@classmethod
def getNumWbddc(cls):
return len(cls.getWbddcIndexRange())
##
# \brief Gets whether the DDCs on the radio have selectable sources.
#
# \copydetails CyberRadioDriver::IRadio::isDdcSelectableSource()
@classmethod
def isDdcSelectableSource(cls, wideband):
ddcType = cls.wbddcType if wideband else cls.nbddcType
return False if ddcType is None else ddcType.selectableSource
##
# \brief Gets whether the wideband or narrowband DDCs on the radio are tunable.
#
# \copydetails CyberRadioDriver::IRadio::isNbddcTunable()
@classmethod
def isDdcTunable(cls, wideband):
ddcType = cls.wbddcType if wideband else cls.nbddcType
return False if ddcType is None else ddcType.tunable
##
# \brief Gets the index range for the wideband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbddcIndexRange()
@classmethod
def getWbddcIndexRange(cls):
return list(range(cls.wbddcIndexBase, cls.wbddcIndexBase + cls.numWbddc, 1))
##
# \brief Gets whether the wideband DDCs on the radio are tunable.
#
# \copydetails CyberRadioDriver::IRadio::isWbddcSelectableSource()
@classmethod
def isWbddcSelectableSource(cls):
return False if cls.wbddcType is None else cls.wbddcType.selectableSource
##
# \brief Gets whether the wideband DDCs on the radio have selectable
# sources.
#
# \copydetails CyberRadioDriver::IRadio::isWbddcTunable()
@classmethod
def isWbddcTunable(cls):
return False if cls.wbddcType is None else cls.wbddcType.tunable
##
# \brief Gets the frequency offset range for the wideband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbddcFrequencyRange()
@classmethod
def getWbddcFrequencyRange(cls):
return (0.0,0.0) if cls.wbddcType is None else cls.wbddcType.frqRange
##
# \brief Gets the frequency offset resolution for wideband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbddcFrequencyRes()
@classmethod
def getWbddcFrequencyRes(cls):
return 0.0 if cls.wbddcType is None else cls.wbddcType.frqRes
##
# \brief Gets the allowed rate set for the wideband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbddcRateSet()
@classmethod
def getWbddcRateSet(cls, index=None):
return cls.getDdcRateSet(True, index)
##
# \brief Gets the allowed rate list for the wideband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbddcRateList()
@classmethod
def getWbddcRateList(cls, index=None):
return cls.getDdcRateList(True, index)
##
# \brief Gets the allowed rate set for the wideband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbddcBwSet()
@classmethod
def getWbddcBwSet(cls, index=None):
return cls.getDdcBwSet(True, index)
##
# \brief Gets the allowed rate list for the wideband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbddcBwList()
@classmethod
def getWbddcBwList(cls, index=None):
return cls.getDdcBwList(True, index)
##
# \brief Gets the number of narrowband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNumNbddc()
@classmethod
def getNumNbddc(cls):
return len(cls.getNbddcIndexRange())
##
# \brief Gets the index range for the narrowband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNbddcIndexRange()
@classmethod
def getNbddcIndexRange(cls):
if cls.numNbddc == 0:
return []
elif cls.nbddcIndexOverride is not None:
return cls.nbddcIndexOverride
else:
return list(range(cls.nbddcIndexBase, cls.nbddcIndexBase + cls.numNbddc, 1))
##
# \brief Gets whether the narrowband DDCs on the radio are tunable.
#
# \copydetails CyberRadioDriver::IRadio::isNbddcTunable()
@classmethod
def isNbddcTunable(cls):
return False if cls.nbddcType is None else cls.nbddcType.tunable
##
# \brief Gets whether the narrowband DDCs on the radio have selectable
# sources.
#
# \copydetails CyberRadioDriver::IRadio::isNbddcSelectableSource()
@classmethod
def isNbddcSelectableSource(cls):
return False if cls.nbddcType is None else cls.nbddcType.selectableSource
##
# \brief Gets the frequency offset range for the narrowband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNbddcFrequencyRange()
@classmethod
def getNbddcFrequencyRange(cls):
return (0.0,0.0) if cls.nbddcType is None else cls.nbddcType.frqRange
##
# \brief Gets the frequency offset resolution for narrowband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNbddcFrequencyRes()
@classmethod
def getNbddcFrequencyRes(cls):
return 0.0 if cls.nbddcType is None else cls.nbddcType.frqRes
##
# \brief Gets the allowed rate set for the narrowband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNbddcRateSet()
@classmethod
def getNbddcRateSet(cls, index=None):
return cls.getDdcRateSet(False, index)
##
# \brief Gets the allowed rate list for the narrowband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNbddcRateList()
@classmethod
def getNbddcRateList(cls, index=None):
return cls.getDdcRateList(False, index)
##
# \brief Gets the allowed rate set for the narrowband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNbddcBwSet()
@classmethod
def getNbddcBwSet(cls, index=None):
return cls.getDdcBwSet(False, index)
##
# \brief Gets the allowed rate list for the narrowband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNbddcBwList()
@classmethod
def getNbddcBwList(cls, index=None):
return cls.getDdcBwList(False, index)
##
# \brief Gets the number of narrowband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNumFftStream()
@classmethod
def getNumFftStream(cls):
return len(cls.getFftStreamIndexRange())
##
# \brief Gets the index range for the narrowband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getFftStreamIndexRange()
@classmethod
def getFftStreamIndexRange(cls):
return [] if cls.numFftStream == 0 else \
list(range(cls.fftStreamIndexBase, cls.fftStreamIndexBase + cls.numFftStream, 1))
##
# \brief Gets the allowed rate set for the FFTs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getFftStreamRateSet()
@classmethod
def getFftStreamRateSet(cls,):
return cls.fftStreamType.getDdcRateSet() if cls.fftStreamType is not None else {}
##
# \brief Gets the allowed rate list for the FFTs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getFftStreamRateList()
@classmethod
def getFftStreamRateList(cls,):
return cls.fftStreamType.getDdcRateList() if cls.fftStreamType is not None else []
##
# \brief Gets the allowed window set for the FFTs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getFftStreamWindowSet()
@classmethod
def getFftStreamWindowSet(cls,):
return cls.fftStreamType.getWindowSet() if cls.fftStreamType is not None else {}
##
# \brief Gets the allowed window list for the FFTs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getFftStreamWindowList()
@classmethod
def getFftStreamWindowList(cls,):
return sorted(cls.fftStreamType.getWindowSet().keys()) if cls.fftStreamType is not None else []
##
# \brief Gets the allowed size set for the FFTs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getFftStreamSizeSet()
@classmethod
def getFftStreamSizeSet(cls,):
return cls.fftStreamType.getSizeSet() if cls.fftStreamType is not None else {}
##
# \brief Gets the allowed size list for the FFTs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getFftStreamSizeList()
@classmethod
def getFftStreamSizeList(cls,):
return sorted(cls.fftStreamType.getSizeSet().keys()) if cls.fftStreamType is not None else []
##
# \brief Gets the ADC sample rate for the radio.
#
# \copydetails CyberRadioDriver::IRadio::getAdcRate()
@classmethod
def getAdcRate(cls):
return cls.adcRate
##
# \brief Gets the VITA 49 header size for the radio.
#
# \copydetails CyberRadioDriver::IRadio::getVitaHeaderSize()
@classmethod
def getVitaHeaderSize(cls, payloadType=None):
return 4 * cls.ifSpecMap.get(payloadType, cls.ifSpec).headerSizeWords
##
# \brief Gets the VITA 49 payload size for the radio.
#
# \copydetails CyberRadioDriver::IRadio::getVitaPayloadSize()
@classmethod
def getVitaPayloadSize(cls, payloadType=None):
return 4 * cls.ifSpecMap.get(payloadType, cls.ifSpec).payloadSizeWords
##
# \brief Gets the VITA 49 tail size for the radio.
#
# \copydetails CyberRadioDriver::IRadio::getVitaTailSize()
@classmethod
def getVitaTailSize(cls, payloadType=None):
return 4 * cls.ifSpecMap.get(payloadType, cls.ifSpec).tailSizeWords
##
# \brief Gets dictionary with information about VITA 49 framing.
#
# \copydetails CyberRadioDriver::IRadio::getVitaFrameInfoDict()
@classmethod
def getVitaFrameInfoDict(cls, payloadType=None):
return cls.ifSpecMap.get(payloadType, cls.ifSpec).getVitaFrameInfoDict()
# \brief Gets whether data coming from the radio is byte-swapped with
# respect to the endianness of the host operating system.
#
# \copydetails CyberRadioDriver::IRadio::isByteswapped()
@classmethod
def isByteswapped(cls, payloadType=None):
return (cls.ifSpecMap.get(payloadType, cls.ifSpec).byteOrder != sys.byteorder)
##
# \brief Gets whether data coming from the radio has I and Q data swapped.
#
# \copydetails CyberRadioDriver::IRadio::isIqSwapped()
@classmethod
def isIqSwapped(cls, payloadType=None):
return cls.ifSpecMap.get(payloadType, cls.ifSpec).iqSwapped
##
# \brief Gets the byte order for data coming from the radio.
#
# \copydetails CyberRadioDriver::IRadio::getByteOrder()
@classmethod
def getByteOrder(cls, payloadType=None):
return cls.ifSpecMap.get(payloadType, cls.ifSpec).byteOrder
##
# \brief Gets the number of Gigabit Ethernet interfaces on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNumGigE()
@classmethod
def getNumGigE(cls):
return len(cls.getGigEIndexRange())
##
# \brief Gets the index range for the Gigabit Ethernet interfaces on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getGigEIndexRange()
@classmethod
def getGigEIndexRange(cls):
return [] if cls.numGigE == 0 else \
list(range(cls.gigEIndexBase, cls.gigEIndexBase + cls.numGigE, 1))
##
# \brief Gets the number of destination IP address table entries available for
# each Gigabit Ethernet interface on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNumGigEDipEntries()
@classmethod
def getNumGigEDipEntries(cls):
return len(cls.getGigEDipEntryIndexRange())
##
# \brief Gets the index range for the destination IP address table entries
# available for the Gigabit Ethernet interfaces on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getGigEDipEntryIndexRange()
@classmethod
def getGigEDipEntryIndexRange(cls):
return [] if cls.numGigE == 0 else \
list(range(cls.gigEDipEntryIndexBase, \
cls.gigEDipEntryIndexBase + cls.numGigEDipEntries, 1))
##
# \brief Gets the list of connection modes that the radio supports.
#
# \copydetails CyberRadioDriver::IRadio::getConnectionModeList()
@classmethod
def getConnectionModeList(cls):
return [] if cls.connectionModes is None else cls.connectionModes
##
# \brief Gets whether the radio supports a given connection mode.
#
# \copydetails CyberRadioDriver::IRadio::isConnectionModeSupported()
@classmethod
def isConnectionModeSupported(cls, mode):
return mode in cls.getConnectionModeList()
##
# \brief Gets the radio's default baud rate.
#
# \copydetails CyberRadioDriver::IRadio::getDefaultBaudrate()
@classmethod
def getDefaultBaudrate(cls):
return cls.defaultBaudrate
##
# \brief Gets the radio's default control port.
#
# \copydetails CyberRadioDriver::IRadio::getDefaultControlPort()
@classmethod
def getDefaultControlPort(cls):
return cls.defaultPort
##
# \brief Gets the allowed VITA enable options set for the radio.
#
# \copydetails CyberRadioDriver::IRadio::getVitaEnableOptionSet()
@classmethod
def getVitaEnableOptionSet(cls):
return {} if cls.vitaEnableOptions is None else cls.vitaEnableOptions
##
# \brief Gets the number of transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNumTransmitters()
@classmethod
def getNumTransmitters(cls):
return len(cls.getTransmitterIndexRange())
##
# \brief Gets the index range for the transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterIndexRange()
@classmethod
def getTransmitterIndexRange(cls):
return [] if cls.numTxs == 0 else \
list(range(cls.txIndexBase, \
cls.txIndexBase + cls.numTxs, 1))
##
# \brief Gets the frequency range for the transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterFrequencyRange()
@classmethod
def getTransmitterFrequencyRange(cls):
return (0.0,0.0) if cls.numTxs == 0 else cls.txType.frqRange
##
# \brief Gets the frequency resolution for transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterFrequencyRes()
@classmethod
def getTransmitterFrequencyRes(cls):
return None if cls.numTxs == 0 else cls.txType.frqRes
##
# \brief Gets the frequency unit for transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterFrequencyUnit()
@classmethod
def getTransmitterFrequencyUnit(cls):
return None if cls.numTxs == 0 else cls.txType.frqUnits
##
# \brief Gets the attenuation range for the transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterAttenuationRange()
@classmethod
def getTransmitterAttenuationRange(cls):
return (0.0,0.0) if cls.numTxs == 0 else cls.txType.attRange
##
# \brief Gets the attenuation resolution for transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterAttenuationRes()
@classmethod
def getTransmitterAttenuationRes(cls):
return None if cls.numTxs == 0 else cls.txType.attRes
##
# \brief Gets whether transmitters on the radio support continuous-wave
# (CW) tone generation.
#
# \copydetails CyberRadioDriver::IRadio::transmitterSupportsCW()
@classmethod
def transmitterSupportsCW(cls):
return (cls.numTxs > 0 and issubclass(cls.txType.toneGenType,
components._cwToneGen))
##
# \brief Gets the number of CW tone generators for each transmitter.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWNum()
@classmethod
def getTransmitterCWNum(cls):
return len(cls.getTransmitterCWIndexRange())
##
# \brief Gets the CW tone generator index range for transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWIndexRange()
@classmethod
def getTransmitterCWIndexRange(cls):
return [] if not cls.transmitterSupportsCW() else \
list(range(cls.txType.toneGenIndexBase, \
cls.txType.toneGenIndexBase + cls.txType.numToneGen, 1))
##
# \brief Gets the CW tone generator frequency range for transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWFrequencyRange()
@classmethod
def getTransmitterCWFrequencyRange(cls):
return (0.0,0.0) if not cls.transmitterSupportsCW() else cls.txType.toneGenType.frqRange
##
# \brief Gets the CW tone generator frequency resolution for transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWFrequencyRes()
@classmethod
def getTransmitterCWFrequencyRes(cls):
return None if not cls.transmitterSupportsCW() else cls.txType.toneGenType.frqRes
##
# \brief Gets the CW tone generator amplitude range for transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWAmplitudeRange()
@classmethod
def getTransmitterCWAmplitudeRange(cls):
return (0.0,0.0) if not cls.transmitterSupportsCW() else cls.txType.toneGenType.ampRange
##
# \brief Gets the CW tone generator amplitude resolution for transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWAmplitudeRes()
@classmethod
def getTransmitterCWAmplitudeRes(cls):
return None if not cls.transmitterSupportsCW() else cls.txType.toneGenType.ampRes
##
# \brief Gets the CW tone generator phase range for transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWPhaseRange()
@classmethod
def getTransmitterCWPhaseRange(cls):
return (0.0,0.0) if not cls.transmitterSupportsCW() else cls.txType.toneGenType.phaseRange
##
# \brief Gets the CW tone generator phase resolution for transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWPhaseRes()
@classmethod
def getTransmitterCWPhaseRes(cls):
return None if not cls.transmitterSupportsCW() else cls.txType.toneGenType.phaseRes
##
# \brief Gets whether transmitters on the radio support sweep functions
# during continuous-wave (CW) tone generation.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWPhaseRes()
@classmethod
def transmitterSupportsCWSweep(cls):
return cls.transmitterSupportsCW() and cls.txType.toneGenType.sweepCmd is not None
##
# \brief Gets the CW tone generator sweep start frequency range for
# transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepStartRange()
@classmethod
def getTransmitterCWSweepStartRange(cls):
return (0.0,0.0) if not cls.transmitterSupportsCWSweep() \
else cls.txType.toneGenType.startRange
##
# \brief Gets the CW tone generator sweep start frequency resolution for
# transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepStartRes()
@classmethod
def getTransmitterCWSweepStartRes(cls):
return None if not cls.transmitterSupportsCWSweep() \
else cls.txType.toneGenType.startRes
##
# \brief Gets the CW tone generator sweep stop frequency range for
# transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepStopRange()
@classmethod
def getTransmitterCWSweepStopRange(cls):
return (0.0,0.0) if not cls.transmitterSupportsCWSweep() \
else cls.txType.toneGenType.stopRange
##
# \brief Gets the CW tone generator sweep stop frequency resolution for
# transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepStopRes()
@classmethod
def getTransmitterCWSweepStopRes(cls):
return None if not cls.transmitterSupportsCWSweep() \
else cls.txType.toneGenType.stopRes
##
# \brief Gets the CW tone generator sweep step frequency range for
# transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepStepRange()
@classmethod
def getTransmitterCWSweepStepRange(cls):
return (0.0,0.0) if not cls.transmitterSupportsCWSweep() \
else cls.txType.toneGenType.stepRange
##
# \brief Gets the CW tone generator sweep step frequency resolution for
# transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepStepRes()
@classmethod
def getTransmitterCWSweepStepRes(cls):
return None if not cls.transmitterSupportsCWSweep() \
else cls.txType.toneGenType.stepRes
##
# \brief Gets the CW tone generator sweep dwell time range for
# transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepDwellRange()
@classmethod
def getTransmitterCWSweepDwellRange(cls):
return (0.0,0.0) if not cls.transmitterSupportsCWSweep() \
else cls.txType.toneGenType.dwellRange
##
# \brief Gets the CW tone generator sweep dwell time resolution for
# transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepDwellRes()
@classmethod
def getTransmitterCWSweepDwellRes(cls):
return None if not cls.transmitterSupportsCWSweep() \
else cls.txType.toneGenType.dwellRes
##
# \brief Gets the number of wideband DUCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNumWbduc()
@classmethod
def getNumWbduc(cls):
return len(cls.getWbducIndexRange())
##
# \brief Gets the index range for the wideband DUCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbducIndexRange()
@classmethod
def getWbducIndexRange(cls):
return list(range(cls.wbducIndexBase, cls.wbducIndexBase + cls.numWbduc, 1))
##
# \brief Gets the frequency offset range for the wideband DUCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbducFrequencyRange()
@classmethod
def getWbducFrequencyRange(cls):
return (0.0,0.0) if cls.wbducType is None else cls.wbducType.frqRange
##
# \brief Gets the frequency resolution for wideband DUCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbducFrequencyRes()
@classmethod
def getWbducFrequencyRes(cls):
return 0.0 if cls.wbducType is None else cls.wbducType.frqRes
##
# \brief Gets the frequency unit for wideband DUCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbducFrequencyUnit()
@classmethod
def getWbducFrequencyUnit(cls):
return 0.0 if cls.wbducType is None else cls.wbducType.frqUnits
##
# \brief Gets the attenuation range for the wideband DUCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbducAttenuationRange()
@classmethod
def getWbducAttenuationRange(cls):
return (0.0,0.0) if cls.wbducType is None else cls.wbducType.attRange
##
# \brief Gets the attenuation resolution for wideband DUCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbducAttenuationRes()
@classmethod
def getWbducAttenuationRes(cls):
return 0.0 if cls.wbducType is None else cls.wbducType.attRes
##
# \brief Gets the allowed rate set for the wideband DUCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbducRateSet()
@classmethod
def getWbducRateSet(cls):
ducObj = cls.wbducType
return ducObj.rateSet if ducObj is not None else {}
##
# \brief Gets the allowed rate list for the wideband DUCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbducRateList()
@classmethod
def getWbducRateList(cls):
ducObj = cls.wbducType
if ducObj is not None:
return [ducObj.rateSet[k] for k in sorted(ducObj.rateSet.keys())]
else:
return []
##
# \brief Gets whether or not the wideband DUCs on the radio support loading
# sample snapshots.
#
# \copydetails CyberRadioDriver::IRadio::wbducSupportsSnapshotLoad()
@classmethod
def wbducSupportsSnapshotLoad(cls):
return (cls.wbducType is not None and cls.wbducType.snapshotLoadCmd is not None)
##
# \brief Gets whether or not the wideband DUCs on the radio support
# transmitting sample snapshots.
#
# \copydetails CyberRadioDriver::IRadio::wbducSupportsSnapshotTransmit()
@classmethod
def wbducSupportsSnapshotTransmit(cls):
return (cls.wbducType is not None and cls.wbducType.snapshotTxCmd is not None)
##
# \brief Gets the index range for the DDC groups on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbddcGroupIndexRange()
@classmethod
def getDdcGroupIndexRange(cls, wideband):
return cls.getWbddcGroupIndexRange() if wideband else cls.getNbddcGroupIndexRange()
##
# \brief Gets the number of wideband DDC groups on the radio.
# \copydetails CyberRadioDriver::IRadio::getNumWbddcGroups()
@classmethod
def getNumWbddcGroups(cls):
return len(cls.getWbddcGroupIndexRange())
##
# \brief Gets the index range for the wideband DDC groups on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbddcGroupIndexRange()
@classmethod
def getWbddcGroupIndexRange(cls):
return list(range(cls.wbddcGroupIndexBase, cls.wbddcGroupIndexBase + cls.numWbddcGroups, 1))
##
# \brief Gets the number of narrowband DDC groups on the radio.
# \copydetails CyberRadioDriver::IRadio::getNumNbddcGroups()
@classmethod
def getNumNbddcGroups(cls):
return len(cls.getNbddcGroupIndexRange())
##
# \brief Gets the index range for the narrowband DDC groups on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNbddcGroupIndexRange()
@classmethod
def getNbddcGroupIndexRange(cls):
return list(range(cls.nbddcGroupIndexBase, cls.nbddcGroupIndexBase + cls.numNbddcGroups, 1))
##
# \brief Gets the number of combined DDC groups on the radio.
# \copydetails CyberRadioDriver::IRadio::getNumCombinedDdcGroups()
@classmethod
def getNumCombinedDdcGroups(cls):
return len(cls.getCombinedDdcGroupIndexRange())
##
# \brief Gets the index range for the combined DDC groups on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getCombinedDdcGroupIndexRange()
@classmethod
def getCombinedDdcGroupIndexRange(cls):
return list(range(cls.cddcGroupIndexBase, cls.cddcGroupIndexBase + cls.numCddcGroups, 1))
##
# \brief Gets the number of wideband DUC groups on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNumWbducGroups()
@classmethod
def getNumWbducGroups(cls):
return len(cls.getWbducGroupIndexRange())
##
# \brief Gets the index range for the wideband DUC groups on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbducGroupIndexRange()
@classmethod
def getWbducGroupIndexRange(cls):
return list(range(cls.wbducGroupIndexBase, cls.wbducGroupIndexBase + cls.numWbducGroups, 1))
# ------------- Deprecated/Helper Methods ----------------- #
##
# \internal
# \brief Define this object's string representation.
def __str__(self):
return self.name
##
# \internal
# \brief Helper function that returns an index list.
def _getIndexList(self,objIndex,objDict):
if objIndex is None:
return list(objDict.keys())
elif type(objIndex) is int:
return [objIndex,] if objIndex in list(objDict.keys()) else []
elif type(objIndex) is list:
return [i for i in objIndex if i in list(objDict.keys())]
else:
return []
##
# \internal
# \brief Helper function that "normalizes" an input configuration dictionary
# section by doing the following:
# <ul>
# <li> Ensuring that keys for any enumerated entries are integers
# <li> Expanding sub-dictionaries with the special "all" key
# <li> Performing specialization for individual entries
#
# \param configDict The incoming configuration dictionary.
# \param entryIndexList The list of entry indices (used in expanding "all" keys).
# \return The new configuration dictionary.
def _normalizeConfigDictSection(self, configDict, entryIndexList):
newConfigDict = {}
# Fix keys in config dictionary
convertKeys = []
invalidKeys = []
for key in configDict:
try:
tmp = int(key)
if tmp != key:
convertKeys.append(key)
except:
if key != configKeys.ALL:
invalidKeys.append(key)
for key in invalidKeys:
configDict.pop(key)
for key in convertKeys:
configDict[int(key)] = configDict.pop(key)
if configKeys.ALL in configDict:
tmpDict = configDict.pop(configKeys.ALL)
for entryNum in entryIndexList:
newConfigDict[entryNum] = copy.deepcopy(tmpDict)
for entryNum in configDict:
if entryNum in newConfigDict:
self._dictUpdate(newConfigDict[entryNum], \
configDict[entryNum], \
newConfigDict[entryNum], \
list(configDict[entryNum].keys()))
else:
newConfigDict[entryNum] = copy.deepcopy(configDict[entryNum])
return newConfigDict
##
# \internal
# \brief Helper function that "normalizes" an input configuration dictionary
# by doing the following:
# <ul>
# <li> Ensuring that keys for component enumerations are integers
# <li> Expanding sub-dictionaries with the special "all" key
# <li> Performing specialization for individual components or entries
# \param configDict The incoming configuration dictionary.
# \return The new configuration dictionary.
def _normalizeConfigDict(self, configDict):
newConfigDict = {}
for configKey in configDict:
if configKey == configKeys.CONFIG_TUNER:
newConfigDict[configKeys.CONFIG_TUNER] = self._normalizeConfigDictSection( \
configDict[configKeys.CONFIG_TUNER], \
self.tunerIndexList)
elif configKey == configKeys.CONFIG_DDC:
newConfigDict[configKeys.CONFIG_DDC] = {}
for ddcType in [configKeys.CONFIG_WBDDC, configKeys.CONFIG_NBDDC]:
isWideband = (ddcType == configKeys.CONFIG_WBDDC)
ddcConfDict = configDict[configKeys.CONFIG_DDC].get(ddcType,{})
ddcIndexRange = self.wbddcIndexList if isWideband else self.nbddcIndexList
newConfigDict[configKeys.CONFIG_DDC][ddcType] = self._normalizeConfigDictSection(\
ddcConfDict, ddcIndexRange)
elif self.numGigE > 0 and configKey == configKeys.CONFIG_IP:
tmpDict = copy.deepcopy(configDict[configKeys.CONFIG_IP])
newConfigDict[configKeys.CONFIG_IP] = self._normalizeConfigDictSection( \
tmpDict, self.gigEIndexList)
for gigEPortNum in self.gigEIndexList:
if gigEPortNum in newConfigDict[configKeys.CONFIG_IP] and \
configKeys.IP_DEST in newConfigDict[configKeys.CONFIG_IP][gigEPortNum]:
tmpDict = copy.deepcopy(newConfigDict[configKeys.CONFIG_IP][gigEPortNum][configKeys.IP_DEST])
newConfigDict[configKeys.CONFIG_IP][gigEPortNum][configKeys.IP_DEST] = \
self._normalizeConfigDictSection(tmpDict, \
self.gigEDipEntryIndexList)
elif self.numTxs > 0 and configKey == configKeys.CONFIG_TX:
tmpDict = copy.deepcopy(configDict[configKeys.CONFIG_TX])
newConfigDict[configKeys.CONFIG_TX] = self._normalizeConfigDictSection( \
tmpDict, \
self.txIndexList)
for txNum in self.getTransmitterIndexRange():
if txNum in newConfigDict[configKeys.CONFIG_TX]:
if configKeys.CONFIG_CW in newConfigDict[configKeys.CONFIG_TX][txNum]:
newConfigDict[configKeys.CONFIG_TX][txNum][configKeys.CONFIG_CW] = \
self._normalizeConfigDictSection( newConfigDict[configKeys.CONFIG_TX][txNum][configKeys.CONFIG_CW], \
self.txToneGenIndexList)
elif configKey == configKeys.CONFIG_DUC:
newConfigDict[configKeys.CONFIG_DUC] = {}
for ducType in [configKeys.CONFIG_WBDUC, configKeys.CONFIG_NBDUC]:
isWideband = (ducType == configKeys.CONFIG_WBDUC)
ducConfDict = configDict[configKeys.CONFIG_DUC].get(ducType,{})
ducIndexRange = self.wbducIndexList if isWideband else self.nbducIndexList
newConfigDict[configKeys.CONFIG_DUC][ducType] = self._normalizeConfigDictSection(\
ducConfDict, ducIndexRange)
pass
elif configKey == configKeys.CONFIG_DDC_GROUP:
newConfigDict[configKeys.CONFIG_DDC_GROUP] = {}
for ddcType in [configKeys.CONFIG_WBDDC_GROUP, configKeys.CONFIG_NBDDC_GROUP,
configKeys.CONFIG_COMBINED_DDC_GROUP]:
isWideband = (ddcType == configKeys.CONFIG_WBDDC_GROUP)
ddcGroupConfDict = configDict[configKeys.CONFIG_DDC_GROUP].get(ddcType,{})
ddcGroupIndexRange = self.wbddcGroupIndexList if isWideband else self.nbddcGroupIndexList
if ddcType == configKeys.CONFIG_COMBINED_DDC_GROUP:
ddcGroupIndexRange = self.cddcGroupIndexList
newConfigDict[configKeys.CONFIG_DDC_GROUP][ddcType] = self._normalizeConfigDictSection(\
ddcGroupConfDict, ddcGroupIndexRange)
elif configKey == configKeys.CONFIG_FFT:
newConfigDict[configKeys.CONFIG_FFT] = self._normalizeConfigDictSection( \
configDict[configKeys.CONFIG_FFT], \
self.fftStreamIndexList)
else:
newConfigDict[configKey] = copy.deepcopy(configDict[configKey])
return newConfigDict
##
# \brief Gets the radio configuration.
#
# \deprecated Use getConfiguration() instead.
#
# \return The dictionary of radio settings.
def getAll(self):
return self.getConfiguration()
##
# \internal
# \brief Helper function for setting the tuner configuration.
#
# Deprecated in favor of setConfiguration().
def setTunerConfigurationNew(self, *args, **kwargs):
success = True
tunerIndex = kwargs.get(configKeys.TUNER_INDEX, None)
for i in self._getIndexList(tunerIndex, self.tunerDict):
success &= self.tunerDict[i].setConfiguration(*args, **kwargs)
self.cmdErrorInfo.extend(self.tunerDict[i].getLastCommandErrorInfo())
return success
##
# \internal
# \brief Helper function for getting the tuner configuration.
#
# Deprecated in favor of getConfiguration().
def getTunerConfigurationNew(self, tunerIndex=None):
config = {}
for i in self._getIndexList(tunerIndex, self.tunerDict):
config[i] = self.tunerDict[i].getConfiguration()
self.cmdErrorInfo.extend(self.tunerDict[i].getLastCommandErrorInfo())
return config
##
# \internal
# \brief Helper function for querying the tuner configuration.
#
# Deprecated in favor of queryConfiguration().
def queryTunerConfigurationNew(self, tunerIndex=None):
config = {}
for i in self._getIndexList(tunerIndex, self.tunerDict):
config[i] = self.tunerDict[i].queryConfiguration()
self.cmdErrorInfo.extend(self.tunerDict[i].getLastCommandErrorInfo())
return config
##
# \internal
# \brief Helper function for setting the DDC configuration.
#
# Deprecated in favor of setConfiguration().
def setDdcConfigurationNew(self, wideband=True, *args, **kwargs):
success = True
ddcDict = self.wbddcDict if wideband else self.nbddcDict
ddcIndex = kwargs.get(configKeys.DDC_INDEX, None)
for i in self._getIndexList(ddcIndex, ddcDict):
success &= ddcDict[i].setConfiguration(*args, **kwargs)
self.cmdErrorInfo.extend(ddcDict[i].getLastCommandErrorInfo())
return success
##
# \internal
# \brief Helper function for getting the DDC configuration.
#
# Deprecated in favor of getConfiguration().
def getDdcConfigurationNew(self, wideband=True, ddcIndex=None):
config = {}
ddcDict = self.wbddcDict if wideband else self.nbddcDict
for i in self._getIndexList(ddcIndex, ddcDict):
config[i] = ddcDict[i].getConfiguration()
self.cmdErrorInfo.extend(ddcDict[i].getLastCommandErrorInfo())
return config
##
# \internal
# \brief Helper function for querying the DDC configuration.
#
# Deprecated in favor of queryConfiguration().
def queryDdcConfigurationNew(self, wideband=True, ddcIndex=None):
config = {}
ddcDict = self.wbddcDict if wideband else self.nbddcDict
for i in self._getIndexList(ddcIndex, ddcDict):
config[i] = ddcDict[i].queryConfiguration()
self.cmdErrorInfo.extend(ddcDict[i].getLastCommandErrorInfo())
return config
##
# \internal
# \brief Helper function for setting the IP configuration.
#
# Deprecated in favor of setConfiguration().
def setIpConfigurationNew(self, confDict):
success = True
# IP configuration set -- The format of the configuration dictionary
# depends on whether the radio has Gigabit Ethernet ports on it or not.
# -- No GigE ports
if self.numGigE == 0:
for cmdClass, configKey in [ \
(self.sipCmd, configKeys.IP_SOURCE), \
(self.dipCmd, configKeys.IP_DEST), \
(self.smacCmd, configKeys.MAC_SOURCE), \
(self.dmacCmd, configKeys.MAC_DEST), \
]:
cDict = { "parent": self, \
"verbose": self.verbose, \
"logFile": self.logFile, \
configKey: confDict.get(configKey, 0)
}
if configKey in confDict and cmdClass is not None and \
cmdClass.settable:
cmd = cmdClass(**cDict)
success &= cmd.send( self.sendCommand, )
if success and cmd.success:
self.configuration[configKeys.CONFIG_IP][configKey] = \
getattr(cmd, configKey)
else:
self.cmdErrorInfo.extend(cmd.errorInfo)
pass
pass
# -- Has GigE ports
else:
for gigEPortNum in self.gigEIndexList:
if gigEPortNum in confDict:
# Set source IP address for this GigE port
if self.sipCmd is not None and self.sipCmd.settable and \
configKeys.IP_SOURCE in confDict[gigEPortNum]:
# What we do here depends on what "sourceIP" points to --
# either a string (NDR308-class) or a dictionary (NDR551-class)
if isinstance(confDict[gigEPortNum][configKeys.IP_SOURCE], str):
# Do it the NDR308 way
cDict = { "parent": self,
"verbose": self.verbose,
"logFile": self.logFile,
configKeys.GIGE_PORT_INDEX: gigEPortNum,
configKeys.IP_SOURCE: confDict[gigEPortNum][configKeys.IP_SOURCE],
}
cmd = self.sipCmd(**cDict)
success &= cmd.send( self.sendCommand, )
if success and cmd.success:
self.configuration[configKeys.CONFIG_IP][gigEPortNum][configKeys.IP_SOURCE] = \
getattr(cmd, configKeys.IP_SOURCE)
else:
self.cmdErrorInfo.extend(cmd.errorInfo)
else:
# Do it the NDR551 way
cDict = { "parent": self,
"verbose": self.verbose,
"logFile": self.logFile,
configKeys.GIGE_PORT_INDEX: gigEPortNum,
}
if configKeys.GIGE_MAC_ADDR in confDict[gigEPortNum][configKeys.IP_SOURCE]:
cDict[configKeys.GIGE_MAC_ADDR] = confDict[gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_MAC_ADDR]
if configKeys.GIGE_IP_ADDR in confDict[gigEPortNum][configKeys.IP_SOURCE]:
cDict[configKeys.GIGE_IP_ADDR] = confDict[gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_IP_ADDR]
if configKeys.GIGE_NETMASK in confDict[gigEPortNum][configKeys.IP_SOURCE]:
cDict[configKeys.GIGE_NETMASK] = confDict[gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_NETMASK]
if configKeys.GIGE_SOURCE_PORT in confDict[gigEPortNum][configKeys.IP_SOURCE]:
cDict[configKeys.GIGE_SOURCE_PORT] = confDict[gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_SOURCE_PORT]
cmd = self.sipCmd(**cDict)
success &= cmd.send( self.sendCommand, )
if success and cmd.success:
#self.logIfVerbose("[setIpConfigurationNew()] cmd attributes = %s" % \
# cmd.attributeDump())
if configKeys.GIGE_MAC_ADDR in cmd.__dict__:
self.configuration[configKeys.CONFIG_IP][gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_MAC_ADDR] = \
getattr(cmd, configKeys.GIGE_MAC_ADDR)
if configKeys.GIGE_IP_ADDR in cmd.__dict__:
self.configuration[configKeys.CONFIG_IP][gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_IP_ADDR] = \
getattr(cmd, configKeys.GIGE_IP_ADDR)
if configKeys.GIGE_NETMASK in cmd.__dict__:
self.configuration[configKeys.CONFIG_IP][gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_NETMASK] = \
getattr(cmd, configKeys.GIGE_NETMASK)
if configKeys.GIGE_SOURCE_PORT in cmd.__dict__:
self.configuration[configKeys.CONFIG_IP][gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_SOURCE_PORT] = \
getattr(cmd, configKeys.GIGE_SOURCE_PORT)
else:
if cmd.errorInfo is not None:
self.cmdErrorInfo.extend(cmd.errorInfo)
# Set destination IP table info for this GigE port
if self.dipCmd is not None and self.dipCmd.settable and \
configKeys.IP_DEST in confDict[gigEPortNum]:
for gigEDipEntryNum in self.gigEDipEntryIndexList:
if gigEDipEntryNum in confDict[gigEPortNum][configKeys.IP_DEST]:
cDict = { "parent": self, \
"verbose": self.verbose, \
"logFile": self.logFile, \
configKeys.GIGE_PORT_INDEX: gigEPortNum, \
configKeys.GIGE_DIP_INDEX: gigEDipEntryNum, \
}
keys = [configKeys.GIGE_IP_ADDR, configKeys.GIGE_MAC_ADDR, \
configKeys.GIGE_SOURCE_PORT, configKeys.GIGE_DEST_PORT, \
configKeys.GIGE_ARP]
self._dictUpdate(cDict, \
confDict[gigEPortNum][configKeys.IP_DEST][gigEDipEntryNum], \
self.configuration[configKeys.CONFIG_IP][gigEPortNum][configKeys.IP_DEST][gigEDipEntryNum], \
keys)
# Don't send along MAC address if there is an ARP setting
# and the ARP setting is True. This prevents errors being
# triggered on radios with less permissive configurations
# (like the NDR551).
if configKeys.GIGE_ARP in cDict and cDict[configKeys.GIGE_ARP]:
cDict.pop(configKeys.GIGE_MAC_ADDR, None)
cmd = self.dipCmd(**cDict)
success &= cmd.send( self.sendCommand, )
if success and cmd.success:
for key in keys:
if hasattr(cmd, key):
self.configuration[configKeys.CONFIG_IP][gigEPortNum][configKeys.IP_DEST][gigEDipEntryNum][key] = \
getattr(cmd, key)
else:
if cmd.errorInfo is not None:
self.cmdErrorInfo.extend(cmd.errorInfo)
pass
# Set flow control for this GigE port
if self.tgfcCmd is not None and self.tgfcCmd.settable and \
configKeys.GIGE_FLOW_CONTROL in confDict[gigEPortNum]:
cDict = { "parent": self, \
"verbose": self.verbose, \
"logFile": self.logFile, \
configKeys.GIGE_PORT_INDEX: gigEPortNum, \
configKeys.GIGE_FLOW_CONTROL: confDict[gigEPortNum][configKeys.GIGE_FLOW_CONTROL], \
}
cmd = self.tgfcCmd(**cDict)
success &= cmd.send( self.sendCommand, )
if success and cmd.success:
self.configuration[configKeys.CONFIG_IP][gigEPortNum][configKeys.GIGE_FLOW_CONTROL] = \
getattr(cmd, configKeys.GIGE_FLOW_CONTROL)
else:
if cmd.errorInfo is not None:
self.cmdErrorInfo.extend(cmd.errorInfo)
pass
return success
##
# \internal
# \brief Helper function for querying the IP configuration.
# \param gigEPortIndex 10-Gig data port index, or None to query all data ports.
def queryIpConfigurationNew(self, gigEPortIndex=None):
# IP configuration query -- The format of this section depends on whether
# the radio has Gigabit Ethernet ports on it or not.
ret = {}
# -- No GigE ports
if self.numGigE == 0:
ret = self._queryIpConfigurationNo10Gig()
# -- Has GigE ports
else:
ret = self._queryIpConfiguration10Gig(gigEPortIndex=gigEPortIndex)
return ret
##
# \internal
# \brief Helper function for querying the IP configuration for radios without
# 10-Gig Ethernet interfaces.
def _queryIpConfigurationNo10Gig(self):
ret = {}
for cmdClass, configKey in [ \
(self.sipCmd, configKeys.IP_SOURCE), \
(self.dipCmd, configKeys.IP_DEST), \
(self.smacCmd, configKeys.MAC_SOURCE), \
(self.dmacCmd, configKeys.MAC_DEST), \
]:
ret[configKey] = None
if cmdClass is not None and cmdClass.queryable:
cmd = cmdClass(parent=self,
query=True,
verbose=self.verbose, logFile=self.logFile)
cmd.send( self.sendCommand, )
self._addLastCommandErrorInfo(cmd)
rspInfo = cmd.getResponseInfo()
if rspInfo is not None:
ret[configKey] = rspInfo.get(configKey, "")
return ret
##
# \internal
# \brief Helper function for querying the IP configuration for radios with
# 10-Gig Ethernet interfaces.
# \param gigEPortIndex 10-Gig data port index, or None to query all data ports.
def _queryIpConfiguration10Gig(self, gigEPortIndex=None):
ret = {}
gigEPortIndexRange = self.getGigEIndexRange() if gigEPortIndex is None else [gigEPortIndex]
for gigEPortNum in gigEPortIndexRange:
ret[gigEPortNum] = {}
# Query source IP address for this GigE port
if self.sipCmd is not None and self.sipCmd.queryable:
# Default source IP info
if self.json:
ret[gigEPortNum][configKeys.IP_SOURCE] = {
configKeys.GIGE_MAC_ADDR: None,
configKeys.GIGE_IP_ADDR: None,
configKeys.GIGE_NETMASK: None,
configKeys.GIGE_SOURCE_PORT: None,
}
else:
ret[gigEPortNum][configKeys.IP_SOURCE] = None
cDict = { "parent": self, \
"query": True, \
"verbose": self.verbose, \
"logFile": self.logFile, \
configKeys.GIGE_PORT_INDEX: gigEPortNum, \
}
cmd = self.sipCmd(**cDict)
cmd.send( self.sendCommand, )
self._addLastCommandErrorInfo(cmd)
rspInfo = cmd.getResponseInfo()
if rspInfo is not None:
# How to parse this depends on whether the radio is JSON or not
if self.json:
# Do it NDR551-style
ret[gigEPortNum][configKeys.IP_SOURCE] = {}
ret[gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_MAC_ADDR] = \
rspInfo.get(configKeys.GIGE_MAC_ADDR, "")
ret[gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_IP_ADDR] = \
rspInfo.get(configKeys.GIGE_IP_ADDR, "")
ret[gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_NETMASK] = \
rspInfo.get(configKeys.GIGE_NETMASK, "")
ret[gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_SOURCE_PORT] = \
rspInfo.get(configKeys.GIGE_SOURCE_PORT, 0)
else:
# Do it NDR308-style
ret[gigEPortNum][configKeys.IP_SOURCE] = \
rspInfo.get(configKeys.IP_SOURCE, "")
# Query destination IP table for this GigE port
if self.dipCmd is not None and self.dipCmd.queryable:
ret[gigEPortNum][configKeys.IP_DEST] = {}
for gigEDipEntryNum in self.gigEDipEntryIndexList:
ret[gigEPortNum][configKeys.IP_DEST][gigEDipEntryNum] = {}
cmd = self.dipCmd(**{})
for configKey in [configKeys.GIGE_IP_ADDR, \
configKeys.GIGE_MAC_ADDR, \
configKeys.GIGE_SOURCE_PORT, \
configKeys.GIGE_DEST_PORT, \
configKeys.GIGE_ARP]:
if hasattr(cmd, "queryParamMap") and configKey in cmd.queryParamMap:
ret[gigEPortNum][configKeys.IP_DEST][gigEDipEntryNum][configKey] = None
elif hasattr(cmd, "queryResponseData") and configKey in [q[0] for q in cmd.queryResponseData]:
ret[gigEPortNum][configKeys.IP_DEST][gigEDipEntryNum][configKey] = None
cDict = { "parent": self, \
"query": True, \
"verbose": self.verbose, \
"logFile": self.logFile, \
configKeys.GIGE_PORT_INDEX: gigEPortNum, \
configKeys.GIGE_DIP_INDEX: gigEDipEntryNum, \
}
cmd = self.dipCmd(**cDict)
cmd.send( self.sendCommand, )
rspInfo = cmd.getResponseInfo()
self._addLastCommandErrorInfo(cmd)
if rspInfo is not None:
for configKey in [configKeys.GIGE_IP_ADDR, \
configKeys.GIGE_MAC_ADDR, \
configKeys.GIGE_SOURCE_PORT, \
configKeys.GIGE_DEST_PORT, \
configKeys.GIGE_ARP]:
if configKey in rspInfo:
ret[gigEPortNum][configKeys.IP_DEST][gigEDipEntryNum][configKey] = \
rspInfo[configKey]
return ret
##
# \internal
# \brief Helper function for setting the transmitter configuration.
#
# Deprecated in favor of setConfiguration().
def setTxConfigurationNew(self, *args, **kwargs):
success = True
txIndex = kwargs.get(configKeys.TX_INDEX, None)
for i in self._getIndexList(txIndex, self.txDict):
success &= self.txDict[i].setConfiguration(*args, **kwargs)
self.cmdErrorInfo.extend(self.txDict[i].getLastCommandErrorInfo())
return success
##
# \internal
# \brief Helper function for getting the transmitter configuration.
#
# Deprecated in favor of getConfiguration().
def getTxConfigurationNew(self, txIndex=None):
config = {}
for i in self._getIndexList(txIndex, self.txDict):
config[i] = self.txDict[i].getConfiguration()
self.cmdErrorInfo.extend(self.txDict[i].getLastCommandErrorInfo())
return config
##
# \internal
# \brief Helper function for querying the transmitter configuration.
#
# Deprecated in favor of getConfiguration().
def queryTxConfigurationNew(self, txIndex=None):
config = {}
for i in self._getIndexList(txIndex, self.txDict):
config[i] = self.txDict[i].queryConfiguration()
self.cmdErrorInfo.extend(self.txDict[i].getLastCommandErrorInfo())
return config
##
# \internal
# \brief Helper function for setting the DUC configuration.
#
# Deprecated in favor of setConfiguration().
def setDucConfigurationNew(self, wideband=True, *args, **kwargs):
success = True
ducDict = self.wbducDict if wideband else self.nbducDict
ducIndex = kwargs.get(configKeys.DUC_INDEX, None)
for i in self._getIndexList(ducIndex, ducDict):
success &= ducDict[i].setConfiguration(*args, **kwargs)
self.cmdErrorInfo.extend(ducDict[i].getLastCommandErrorInfo())
return success
##
# \internal
# \brief Helper function for getting the DUC configuration.
#
# Deprecated in favor of getConfiguration().
def getDucConfigurationNew(self, wideband=True, ducIndex=None):
config = {}
ducDict = self.wbducDict if wideband else self.nbducDict
for i in self._getIndexList(ducIndex, ducDict):
config[i] = ducDict[i].getConfiguration()
self.cmdErrorInfo.extend(ducDict[i].getLastCommandErrorInfo())
return config
##
# \internal
# \brief Helper function for querying the DUC configuration.
#
# Deprecated in favor of getConfiguration().
def queryDucConfigurationNew(self, wideband=True, ducIndex=None):
config = {}
ducDict = self.wbducDict if wideband else self.nbducDict
for i in self._getIndexList(ducIndex, ducDict):
config[i] = ducDict[i].queryConfiguration()
self.cmdErrorInfo.extend(ducDict[i].getLastCommandErrorInfo())
return config
##
# \internal
# \brief Helper function for getting the DDC group configuration.
#
# Deprecated in favor of getConfiguration().
def getDdcGroupConfigurationNew(self, wideband=True, ddcGroupIndex=None):
config = {}
ddcGroupDict = self.wbddcGroupDict if wideband else self.nbddcGroupDict
for i in self._getIndexList(ddcGroupIndex, ddcGroupDict):
config[i] = ddcGroupDict[i].getConfiguration()
self.cmdErrorInfo.extend(ddcGroupDict[i].getLastCommandErrorInfo())
return config
##
# \internal
# \brief Helper function for querying the DDC group configuration.
#
# Deprecated in favor of queryConfiguration().
def queryDdcGroupConfigurationNew(self, wideband=True, ddcGroupIndex=None):
config = {}
ddcGroupDict = self.wbddcGroupDict if wideband else self.nbddcGroupDict
for i in self._getIndexList(ddcGroupIndex, ddcGroupDict):
config[i] = ddcGroupDict[i].queryConfiguration()
self.cmdErrorInfo.extend(ddcGroupDict[i].getLastCommandErrorInfo())
return config
##
# \internal
# \brief Helper function for setting the DDC group configuration.
#
# Deprecated in favor of setConfiguration().
def setDdcGroupConfigurationNew(self, wideband=True, *args, **kwargs):
success = True
ddcGroupDict = self.wbddcGroupDict if wideband else self.nbddcGroupDict
ddcGroupIndex = kwargs.get(configKeys.INDEX, None)
for i in self._getIndexList(ddcGroupIndex, ddcGroupDict):
success &= ddcGroupDict[i].setConfiguration(*args, **kwargs)
self.cmdErrorInfo.extend(ddcGroupDict[i].getLastCommandErrorInfo())
return success
##
# \internal
# \brief Helper function for getting the combined DDC group configuration.
#
# Deprecated in favor of getConfiguration().
def getCombinedDdcGroupConfigurationNew(self, ddcGroupIndex=None):
config = {}
ddcGroupDict = self.cddcGroupDict
for i in self._getIndexList(ddcGroupIndex, ddcGroupDict):
config[i] = ddcGroupDict[i].getConfiguration()
self.cmdErrorInfo.extend(ddcGroupDict[i].getLastCommandErrorInfo())
return config
##
# \internal
# \brief Helper function for querying the combined DDC group configuration.
#
# Deprecated in favor of queryConfiguration().
def queryCombinedDdcGroupConfigurationNew(self, ddcGroupIndex=None):
config = {}
ddcGroupDict = self.cddcGroupDict
for i in self._getIndexList(ddcGroupIndex, ddcGroupDict):
config[i] = ddcGroupDict[i].queryConfiguration()
self.cmdErrorInfo.extend(ddcGroupDict[i].getLastCommandErrorInfo())
return config
##
# \internal
# \brief Helper function for setting the combined DDC group configuration.
#
# Deprecated in favor of setConfiguration().
def setCombinedDdcGroupConfigurationNew(self, *args, **kwargs):
success = True
#self.logIfVerbose("[ndr551][setCombinedDdcGroupConfigurationNew()] begin")
ddcGroupDict = self.cddcGroupDict
ddcGroupIndex = kwargs.get(configKeys.INDEX, None)
for i in self._getIndexList(ddcGroupIndex, ddcGroupDict):
success &= ddcGroupDict[i].setConfiguration(*args, **kwargs)
self.cmdErrorInfo.extend(ddcGroupDict[i].getLastCommandErrorInfo())
#self.logIfVerbose("[ndr551][setCombinedDdcGroupConfigurationNew()] end")
return success
##
# \internal
# \brief Helper function for getting the DUC group configuration.
#
# Deprecated in favor of getConfiguration().
def getDucGroupConfigurationNew(self, wideband=True, ducGroupIndex=None):
config = {}
ducGroupDict = self.wbducGroupDict if wideband else self.nbducGroupDict
for i in self._getIndexList(ducGroupIndex, ducGroupDict):
config[i] = ducGroupDict[i].getConfiguration()
self.cmdErrorInfo.extend(ducGroupDict[i].getLastCommandErrorInfo())
return config
##
# \internal
# \brief Helper function for querying the DUC group configuration.
#
# Deprecated in favor of queryConfiguration().
def queryDucGroupConfigurationNew(self, wideband=True, ducGroupIndex=None):
config = {}
ducGroupDict = self.wbducGroupDict if wideband else self.nbducGroupDict
for i in self._getIndexList(ducGroupIndex, ducGroupDict):
config[i] = ducGroupDict[i].queryConfiguration()
self.cmdErrorInfo.extend(ducGroupDict[i].getLastCommandErrorInfo())
return config
##
# \internal
# \brief Helper function for setting the DUC group configuration.
#
# Deprecated in favor of setConfiguration().
def setDucGroupConfigurationNew(self, wideband=True, *args, **kwargs):
success = True
ducGroupDict = self.wbducGroupDict if wideband else self.nbducGroupDict
ducGroupIndex = kwargs.get(configKeys.INDEX, None)
for i in self._getIndexList(ducGroupIndex, ducGroupDict):
success &= ducGroupDict[i].setConfiguration(*args, **kwargs)
self.cmdErrorInfo.extend(ducGroupDict[i].getLastCommandErrorInfo())
return success
##
# \internal
# \brief Helper function for getting the tuner group configuration.
#
# Deprecated in favor of getConfiguration().
def getTunerGroupConfigurationNew(self, tunerGroupIndex=None):
config = {}
for i in self._getIndexList(tunerGroupIndex, self.tunerGroupDict):
config[i] = self.tunerGroupDict[i].getConfiguration()
self.cmdErrorInfo.extend(self.tunerGroupDict[i].getLastCommandErrorInfo())
return config
##
# \internal
# \brief Helper function for querying the tuner group configuration.
#
# Deprecated in favor of queryConfiguration().
def queryTunerGroupConfigurationNew(self, tunerGroupIndex=None):
config = {}
for i in self._getIndexList(tunerGroupIndex, self.tunerGroupDict):
config[i] = self.tunerGroupDict[i].queryConfiguration()
self.cmdErrorInfo.extend(self.tunerGroupDict[i].getLastCommandErrorInfo())
return config
##
# \internal
# \brief Helper function for setting the tuner group configuration.
#
# Deprecated in favor of setConfiguration().
def setTunerGroupConfigurationNew(self, *args, **kwargs):
success = True
tunerGroupIndex = kwargs.get(configKeys.INDEX, None)
for i in self._getIndexList(tunerGroupIndex, self.tunerGroupDict):
success &= self.tunerGroupDict[i].setConfiguration(*args, **kwargs)
self.cmdErrorInfo.extend(self.tunerGroupDict[i].getLastCommandErrorInfo())
return success
##
# \internal
# \brief Helper function for setting the FFT stream configuration.
#
# Deprecated in favor of setConfiguration().
#
def setFftStreamConfiguration(self, *args, **kwargs):
success = True
index = kwargs.get(configKeys.FFT_INDEX, None)
for i in self._getIndexList(index, self.fftStreamDict):
success &= self.fftStreamDict[i].setConfiguration(**kwargs)
self.cmdErrorInfo.extend(self.fftStreamDict[i].getLastCommandErrorInfo())
return success
##
# \internal
# \brief Helper function for getting the FFT stream configuration.
#
# Deprecated in favor of getConfiguration().
def getFftStreamConfiguration(self, fftStreamIndex=None):
config = {}
for i in self._getIndexList(fftStreamIndex, self.fftStreamDict):
config[i] = self.fftStreamDict[i].getConfiguration()
self.cmdErrorInfo.extend(self.fftStreamDict[i].getLastCommandErrorInfo())
return config
##
# \internal
# \brief Helper function for querying the FFT stream configuration.
#
# Deprecated in favor of queryConfiguration().
def queryFftStreamConfiguration(self, fftStreamIndex=None):
config = {}
for i in self._getIndexList(fftStreamIndex, self.fftStreamDict):
config[i] = self.fftStreamDict[i].queryConfiguration()
self.cmdErrorInfo.extend(self.fftStreamDict[i].getLastCommandErrorInfo())
return config
##
# \internal
# \brief Helper function for configuring the IP addresses.
def configureIp(self,iface,udpBase=41000,maxUdp=None):
success = True
self.logIfVerbose( "configureIP CALLED" )
if type(iface) is list and len(iface)>1:
self.logIfVerbose( "configuring dual interfaces %s"%repr(iface) )
maxUdp = 32
udpList = []
if type(udpBase) in (int,float):
udpBase = [udpBase,udpBase]
elif type(udpBase) is list:
if len(udpBase)==1:
udpBase.append(udpBase[0])
for index,interface in enumerate(iface):
udpList.append( list(range(udpBase[index]+index*100,udpBase[index]+maxUdp+index*100)) )
mac,dip = getInterfaceAddresses(iface[index])
x = [ int(i) for i in dip.split(".") ]
x[-1]+=10
sip = ".".join( [str(i) for i in x] )
sipCmd = command.radio_command( parent=self, cmdString="SIP %d,%s"%(index+1,sip),
verbose=self.verbose, logFile=self.logFile )
success &= sipCmd.send( self.sendCommand )
for i in range(maxUdp):
args = ", ".join( [str(i) for i in (index+1,i,dip,mac,udpList[index][i],udpList[index][i])] )
dipCmd = command.radio_command( parent=self, cmdString="DIP %s"%args,
verbose=self.verbose, logFile=self.logFile )
success &= dipCmd.send( self.sendCommand )
else:
self.logIfVerbose("configuring single interface %s"%repr(iface))
if type(iface) is list:
iface = iface[0]
if maxUdp is None:
maxUdp = self.numWbddc+self.numNbddc
self.udpList = [list(range(udpBase,udpBase+maxUdp)),]
mac,dip = getInterfaceAddresses(iface)
x = [ int(i) for i in dip.split(".") ]
x[-1]+=10
sip = ".".join( [str(i) for i in x] )
for cmd in ( command.radio_command(parent=self, cmdString="SIP %s"%sip,
verbose=self.verbose, logFile=self.logFile), \
command.radio_command(parent=self, cmdString="DIP %s"%dip,
verbose=self.verbose, logFile=self.logFile), \
command.radio_command(parent=self, cmdString="TDMAC %s"%mac,
verbose=self.verbose, logFile=self.logFile), \
):
success &= cmd.send( self.sendCommand )
return success
##
# \brief Gets the number of DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNumDdc()
@classmethod
def getNumDdc(cls, wideband):
return len(cls.getDdcIndexRange(wideband))
##
# \brief Gets the allowed rate set for the DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getDdcRateSet()
@classmethod
def getDdcRateSet(cls, wideband, index=None):
ddcObj = cls.wbddcType if wideband else cls.nbddcType
return ddcObj.getDdcRateSet(index) if ddcObj is not None else {}
##
# \brief Gets the allowed rate list for the DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getDdcRateList()
@classmethod
def getDdcRateList(cls, wideband, index=None):
ddcObj = cls.wbddcType if wideband else cls.nbddcType
return ddcObj.getDdcRateList(index) if ddcObj is not None else []
##
# \brief Gets the allowed bandwidth set for the DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getDdcBwSet()
@classmethod
def getDdcBwSet(cls, wideband, index=None):
ddcObj = cls.wbddcType if wideband else cls.nbddcType
return ddcObj.getDdcBwSet(index) if ddcObj is not None else {}
##
# \brief Gets the allowed bandwidth list for the DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getDdcBwList()
@classmethod
def getDdcBwList(cls, wideband, index=None):
ddcObj = cls.wbddcType if wideband else cls.nbddcType
return ddcObj.getDdcBwList(index) if ddcObj is not None else []
##
# \brief Gets the set of available DDC data formats.
#
# \copydetails CyberRadioDriver::IRadio::getDdcDataFormat()
@classmethod
def getDdcDataFormat(cls, wideband):
ddcObj = cls.wbddcType if wideband else cls.nbddcType
return ddcObj.getDdcDataFormat() if ddcObj is not None else {}
##
# \brief Gets the frequency offset range for the narrowband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNbddcFrequencyRange()
@classmethod
def getDdcFrequencyRange(cls, wideband, index=None):
ddcType = cls.wbddcType if wideband else cls.nbddcType
return (0.0,0.0) if ddcType is None else ddcType.frqRange
##
# \brief Gets the list of DDC indexes for a specified type.
#
# \copydetails CyberRadioDriver::IRadio::getDdcIndexRange()
@classmethod
def getDdcIndexRange(cls, wideband):
return cls.getWbddcIndexRange() if wideband else cls.getNbddcIndexRange()
##
# \internal
# \brief Convenience method for configuring the Ethernet addresses on a radio that does not
# have Gigabit Ethernet ports.
#
# \param sip The source IP address. If this is None, the source IP address will not
# be changed.
# \param dip The destination IP address. If this is None, the destination IP address
# will not be changed.
# \param dmac The destination MAC address. If this is None, the destination MAC address
# will not be changed.
# \return True if the configuration succeeded, False otherwise.
def setIpConfiguration(self, sip=None, dip=None, dmac=None):
configDict = {
configKeys.CONFIG_IP: {
}
}
if sip is not None:
configDict[configKeys.CONFIG_IP][configKeys.IP_SOURCE] = copy.deepcopy(sip)
if dip is not None:
configDict[configKeys.CONFIG_IP][configKeys.IP_DEST] = copy.deepcopy(dip)
if dmac is not None:
configDict[configKeys.CONFIG_IP][configKeys.MAC_DEST] = copy.deepcopy(dmac)
return self._setConfiguration(configDict)
##
# \internal
def setDip(self,udp,dip="255.255.255.255",dmac="ff:ff:ff:ff:ff:ff",ifIndex=None,subIndex=None):
pass
##
# \internal
# \brief Sets tuner configuration (old-style).
#
# \deprecated Use setConfiguration() instead.
#
# \param frequency Tuner frequency.
# \param attenuation Tuner attenuation.
# \param tunerIndex Either None (configure all tuners), an index number (configure
# a specific tuner), or a list of index numbers (configure a set of tuners).
# \return True if successful, False otherwise.
def setTunerConfiguration(self,frequency,attenuation,tunerIndex=None):
success = True
for i in self._getIndexList(tunerIndex, self.tunerDict):
# success &= self.tunerDict[i].setConfiguration(frequency,attenuation)
success &= self.tunerDict[i].setConfiguration( **{
configKeys.TUNER_FREQUENCY: frequency,
configKeys.TUNER_ATTENUATION: attenuation,
} )
return success
##
# \internal
# \brief Gets tuner configuration (old-style).
#
# \deprecated Use getConfiguration() instead.
#
# \param tunerIndex Either None (get for all tuners), an index number (get for
# a specific tuner), or a list of index numbers (get for a set of tuners).
# \return A dictionary with configuration information.
def getTunerConfiguration(self,tunerIndex=None):
config = {}
for i in self._getIndexList(tunerIndex, self.tunerDict):
config[i] = self.tunerDict[i].getConfiguration()
return config
##
# \internal
# \brief Sets tuner frequency (old-style).
#
# \deprecated Use setConfiguration() instead.
#
# \param frequency Tuner frequency.
# \param tunerIndex Either None (configure all tuners), an index number (configure
# a specific tuner), or a list of index numbers (configure a set of tuners).
# \return True if successful, False otherwise.
def setTunerFrequency(self,frequency,tunerIndex=None):
success = True
for i in self._getIndexList(tunerIndex, self.tunerDict):
# success &= self.tunerDict[i].setFrequency(frequency)
success &= self.tunerDict[i].setConfiguration( **{
configKeys.TUNER_FREQUENCY: frequency,
} )
return success
##
# \internal
# \brief Gets tuner frequency information (old-style).
#
# \deprecated Use getConfiguration() instead.
#
# \param tunerIndex Either None (get for all tuners), an index number (get for
# a specific tuner), or a list of index numbers (get for a set of tuners).
# \return A dictionary with frequency information.
def getTunerFrequency(self,tunerIndex=None,):
frqDict = {}
for i in self._getIndexList(tunerIndex, self.tunerDict):
#frqDict[i] = self.tunerDict[i].getFrequency()
frqDict[i] = self.tunerDict[i].configuration.get(configKeys.TUNER_FREQUENCY, None)
return frqDict
##
# \internal
# \brief Sets tuner attenuation (old-style).
#
# \deprecated Use setConfiguration() instead.
#
# \param attenuation Tuner attenuation.
# \param tunerIndex Either None (configure all tuners), an index number (configure
# a specific tuner), or a list of index numbers (configure a set of tuners).
# \return True if successful, False otherwise.
def setTunerAttenuation(self,attenuation,tunerIndex=None):
success = True
for i in self._getIndexList(tunerIndex, self.tunerDict):
# success &= self.tunerDict[i].setAttenuation(attenuation)
success &= self.tunerDict[i].setConfiguration( **{
configKeys.TUNER_ATTENUATION: attenuation,
} )
return success
##
# \internal
# \brief Gets tuner attenuation information (old-style).
#
# \deprecated Use getConfiguration() instead.
#
# \param tunerIndex Either None (get for all tuners), an index number (get for
# a specific tuner), or a list of index numbers (get for a set of tuners).
# \return A dictionary with attenuation information.
def getTunerAttenuation(self,tunerIndex=None,):
att = {}
for i in self._getIndexList(tunerIndex, self.tunerDict):
# att[i] = self.tunerDict[i].getAttenuation()
att[i] = self.tunerDict[i].configuration.get(configKeys.TUNER_ATTENUATION, None)
return att
##
# \internal
# \brief Sets DDC configuration (old-style).
#
# \deprecated Use setConfiguration() instead.
#
# \param wideband Whether the DDC is a wideband DDC.
# \param ddcIndex Either None (configure all DDCs), an index number (configure
# a specific DDC), or a list of index numbers (configure a set of DDCs).
# \param rfIndex DDC RF index number.
# \param rateIndex DDC rate index number.
# \param udpDest UDP destination.
# \param frequency Frequency offset.
# \param enable 1 if DDC is enabled, 0 if not.
# \param vitaEnable VITA 49 streaming option, as appropriate for the radio.
# \param streamId VITA 49 stream ID.
# \return True if successful, False otherwise.
def setDdcConfiguration(self,wideband,ddcIndex=None,rfIndex=1,rateIndex=0,udpDest=0,frequency=0,enable=0,vitaEnable=0,streamId=0):
success = True
ddcDict = self.wbddcDict if wideband else self.nbddcDict
for i in self._getIndexList(ddcIndex,ddcDict):
# ddcDict[i].setConfiguration(rfIndex=rfIndex,rateIndex=rateIndex,udpDest=udpDest,frequency=frequency,enable=enable,vitaEnable=vitaEnable,streamId=streamId)
success &= ddcDict[i].setConfiguration( **{
configKeys.NBDDC_RF_INDEX: rfIndex,
configKeys.DDC_RATE_INDEX: rateIndex,
configKeys.DDC_UDP_DESTINATION: udpDest,
configKeys.DDC_FREQUENCY_OFFSET: frequency,
configKeys.ENABLE: enable,
configKeys.DDC_VITA_ENABLE: vitaEnable,
configKeys.DDC_STREAM_ID: streamId,
} )
return success
##
# \brief Disables ethernet flow control on the radio.
#
# \copydetails CyberRadioDriver::IRadio::disableTenGigFlowControl()
def disableTenGigFlowControl(self,):
return self.setTenGigFlowControlStatus(False)
##
# \brief Enables ethernet flow control on the radio.
#
# \copydetails CyberRadioDriver::IRadio::enableTenGigFlowControl()
def enableTenGigFlowControl(self,):
return self.setTenGigFlowControlStatus(True)
##
# \brief method to enable or disable ethernet flow control on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTenGigFlowControlStatus()
def setTenGigFlowControlStatus(self,enable=False):
return False
##
# \brief Queries status of flow control handling.
#
# \copydetails CyberRadioDriver::IRadio::getTenGigFlowControlStatus()
def getTenGigFlowControlStatus(self,):
return {}
##
# \brief Performs coherent tuning.
#
# \copydetails CyberRadioDriver::IRadio::coherentTune()
def coherentTune(self, cohGroup, freq):
ret = True
if self.cohTuneCmd is not None:
cDict = { "parent": self, \
"verbose": self.verbose, \
"logFile": self.logFile, \
configKeys.TUNER_COHERENT_GROUP: cohGroup,
configKeys.TUNER_FREQUENCY: freq,
}
cmd = self.cohTuneCmd(**cDict)
ret &= cmd.send( self.sendCommand, )
self.logIfVerbose("coherentTune send result =", ret)
ret &= cmd.success
self.logIfVerbose("coherentTune success result =", ret)
self._addLastCommandErrorInfo(cmd)
if ret:
self.logIfVerbose("force tuner requery")
self.queryTunerConfigurationNew(tunerIndex=None)
pass
else:
ret = False
return ret
##
# \brief Gets the current FPGA state.
#
# \copydetails CyberRadioDriver::IRadio::getFpgaState()
def getFpgaState(self):
ret = None
if self.fpgaStateCmd is not None:
ret = self.getConfigurationByKeys("fpgaState")
return ret
##
# \brief Sets the current FPGA state.
#
# \copydetails CyberRadioDriver::IRadio::setFpgaState()
def setFpgaState(self, state):
ret = False
if self.fpgaStateCmd is not None:
ret = self.setConfiguration({"fpgaState": state})
return ret
# OVERRIDE
##
# \brief Sets whether or not the object is in verbose mode.
#
# \copydetails CyberRadioDriver::IRadio::setVerbose()
def setVerbose(self, verbose):
# Set this object's verbose mode
log._logger.setVerbose(self, verbose)
# Set verbose mode on all components
for obj in self.componentList:
obj.setVerbose(verbose)
##
# \brief Sets the log file.
#
# \copydetails CyberRadioDriver::IRadio::setLogFile()
def setLogFile(self, logFile):
# Set this object's log file
log._logger.setLogFile(self, logFile)
# Set log file on all components
for obj in self.componentList:
obj.setLogFile(logFile)
##
# \brief Gets the list of connected data port interface indices.
#
# \copydetails CyberRadioDriver::IRadio::getConnectedDataPorts()
def getConnectedDataPorts(self):
ret = []
if self.isCrddConnection:
ret = self._crddGetConnectedDataPortIndices()
return ret
##
# \internal
# \brief Converts a user-specified time string into a number of seconds
# since 1/1/70.
#
# The time string can be either:
# \li Absolute time, in any supported format
# \li Relative time specified as now{-n}, where n is a number of seconds
# \li Relative time specified as now{-[[H:]MM:]SS}
# \li "begin", which is the beginning of known time (1/1/70)
# \li "end", which is the end of trackable time and far beyond the
# useful life of this utility (01/18/2038)
#
# \throws RuntimeException if the time string format cannot be understood.
# \param timestr The time string.
# \param utc Whether or not the user's time string is in UTC time.
# \return The time, in number of seconds since the Epoch
@staticmethod
def timeFromString(timestr, utc=True):
ret = 0
tm = None
tstr = timestr.strip()
if tstr == "":
ret = 0
elif tstr == "begin":
ret = 0
elif tstr == "end":
ret = sys.maxsize
else:
if tstr.find('now') != -1:
tm = datetime.datetime.utcnow() if utc else datetime.datetime.now()
i = tstr.find('-')
if i != -1:
tmp = tstr[i+1:]
tm = tm - datetime.timedelta(seconds=_radio.timeSecsFromString(tmp))
else:
# Replace strings "today" and "yesterday"
tmToday = datetime.datetime.utcnow() if utc else datetime.datetime.now()
tmYesterday = tmToday - datetime.timedelta(days=1)
dateStrToday = tmToday.strftime("%Y%m%d")
dateStrYesterday = tmYesterday.strftime("%Y%m%d")
tstr = tstr.replace("today", dateStrToday).replace("yesterday", dateStrYesterday)
# Try a series of known formats
# -- Formats are 5-tuples: (format string, width, needs year, needs month, needs day)
supportedFmts = [ \
('%Y-%m-%dT%H:%M:%S%z', 24, False, False, False), \
('%Y-%m-%dT%H:%M:%S', 19, False, False, False), \
('%Y%m%d:%H%M%S', 15, False, False, False), \
('%a %b %d %H:%M:%S %Y', 24, False, False, False), \
('%b %d %H:%M:%S', 15, True, False, False), \
('%b %d, %Y %I:%M:%S %p', 24, False, False, False), \
('%Y-%m-%d %H:%M:%S', 19, False, False, False), \
('%Y/%m/%d %H:%M:%S', 19, False, False, False), \
('%Y%m%d_%H%M%S', 15, False, False, False), \
('%m/%d/%Y %H:%M', 16, False, False, False), \
('%m/%d/%y %H:%M:%S', 17, False, False, False), \
('%Y%m%d', 8, False, False, False), \
('%Y-%m-%d', 10, False, False, False), \
('%H:%M:%S', 8, True, True, True), \
('%H%M%S', 6, True, True, True), \
]
for fmt in supportedFmts:
try:
tmp = tstr[:fmt[1]].strip()
#print "[DBG][timeFromString] Convert"
#print "[DBG][timeFromString] -- string:", tmp
#print "[DBG][timeFromString] -- format:", fmt[0]
tm = datetime.datetime.strptime(tmp, fmt[0])
#print "[DBG][timeFromString] -- SUCCESS"
# Replace date items from today's date as needed by the format
# -- Day
if fmt[4]:
tm = tm.replace(day=tmToday.day)
# -- Month
if fmt[3]:
tm = tm.replace(month=tmToday.month)
# -- Year
if fmt[2]:
tm = tm.replace(year=tmToday.year)
# But if the resulting date is in the future, then we need to dial it
# back a year
if tm > tmToday:
tm = tm.replace(year=tmToday.year-1)
break
except:
#print "[DBG][timeFromString] -- FAILURE"
tm = None
if tm is not None:
ret = time.mktime(tm.timetuple())
else:
raise RuntimeError("Improperly formatted time: \"" + tstr + "\"")
return ret
##
# Converts a time string ([+-][[H:]M:]S) to a time in seconds.
#
# \note Hours and minutes are not bounded in any way. These strings provide the
# same result:
# \li "7200"
# \li "120:00"
# \li "2:00:00"
#
# \throws RuntimeError if the time is formatted improperly.
# \param timeStr The time string.
# \return The number of seconds.
@staticmethod
def timeSecsFromString(timeStr):
hrs = 0
mins = 0
secs = 0
sgn = 1
try:
if "-" in timeStr:
sgn = -1
tmp = timeStr.strip().translate(None, " +-")
if tmp != "":
vec = tmp.split(":")
if vec[-1] != "":
secs = int(vec[-1])
else:
raise RuntimeError("Improperly formatted time: \"" + timeStr + "\"")
if len(vec) > 1:
if vec[-2] != "":
mins = int(vec[-2])
else:
raise RuntimeError("Improperly formatted time: \"" + timeStr + "\"")
if len(vec) > 2:
if vec[-3] != "":
hrs = int(vec[-3])
else:
raise RuntimeError("Improperly formatted time: \"" + timeStr + "\"")
except:
raise RuntimeError("Improperly formatted time: \"" + timeStr + "\"")
return ( sgn * (hrs * 3600 + mins * 60 + secs) )
##
# \internal
# \brief Radio handler class that supports nothing more complicated than
# identifying a connected radio.
#
# Used internally to support radio auto-detection.
#
# This class implements the CyberRadioDriver.IRadio interface.
#
class _radio_identifier(_radio):
_name = "Radio Identifier"
json = False
ifSpec = _ifSpec
adcRate = 1.0
numTuner = 0
numTunerBoards = 0
tunerType = None
numWbddc = 0
wbddcType = None
numNbddc = 0
nbddcType = None
numTxs = 0
txType = None
numWbduc = 0
wbducType = None
numNbduc = 0
nbducType = None
numWbddcGroups = 0
wbddcGroupType = None
numNbddcGroups = 0
nbddcGroupType = None
numTunerGroups = 0
tunerGroupType = None
numGigE = 0
numGigEDipEntries = 0
idnQry = command.idn
verQry = command.ver
hrevQry = command.hrev
statQry = None
tstatQry = None
tadjCmd = None
resetCmd = None
cfgCmd = None
ppsCmd = None
utcCmd = None
refCmd = None
rbypCmd = None
sipCmd = None
dipCmd = None
smacCmd = None
dmacCmd = None
calfCmd = None
nbssCmd = None
fnrCmd = None
gpsCmd = None
gposCmd = None
rtvCmd = None
tempCmd = None
gpioStaticCmd = None
gpioSeqCmd = None
tgfcCmd = None
refModes = {}
rbypModes = {}
vitaEnableOptions = {}
connectionModes = ["https", "tcp", "udp", "tty"]
validConfigurationKeywords = []
setTimeDefault = False
# OVERRIDE
##
# \protected
# \brief Queries hardware to determine the object's current configuration.
def _queryConfiguration(self):
# Call the base-class implementation
configKeys.Configurable._queryConfiguration(self)
# This radio has nothing further that it can configure
##
# \brief Radio function (mode) command used by JSON-based radios.
#
class funJSON(command._jsonCommandBase):
mnemonic = "fun"
queryParamMap = {
configKeys.RADIO_FUNCTION: "state",
}
setParamMap = {
configKeys.RADIO_FUNCTION: "state",
}
##
# \internal
# \brief Radio handler class that supports nothing more complicated than
# identifying a connected radio.
#
# Used internally to support radio auto-detection.
#
# This class implements the CyberRadioDriver.IRadio interface.
#
class _radio_identifier_json(_radio):
_name = "Radio Identifier"
json = True
ifSpec = _ifSpec
adcRate = 1.0
numTuner = 0
numTunerBoards = 0
tunerType = None
numWbddc = 0
wbddcType = None
numNbddc = 0
nbddcType = None
numTxs = 0
txType = None
numWbduc = 0
wbducType = None
numNbduc = 0
nbducType = None
numWbddcGroups = 0
wbddcGroupType = None
numNbddcGroups = 0
nbddcGroupType = None
numTunerGroups = 0
tunerGroupType = None
numGigE = 0
numGigEDipEntries = 0
idnQry = None
verQry = None
hrevQry = None
statQry = command.status_json
tstatQry = None
tadjCmd = None
resetCmd = None
cfgCmd = None
ppsCmd = None
utcCmd = None
refCmd = None
rbypCmd = None
sipCmd = None
dipCmd = None
smacCmd = None
dmacCmd = None
calfCmd = None
nbssCmd = None
fnrCmd = None
gpsCmd = None
gposCmd = None
rtvCmd = None
tempCmd = None
gpioStaticCmd = None
gpioSeqCmd = None
tgfcCmd = None
funCmd = funJSON
refModes = {}
rbypModes = {}
vitaEnableOptions = {}
connectionModes = ["https", "tcp", "udp", "tty"]
validConfigurationKeywords = [
configKeys.RADIO_FUNCTION
]
setTimeDefault = False
# OVERRIDE
##
# \brief Returns version information for the radio.
#
# \copydetails CyberRadioDriver::IRadio::getVersionInfo()
def getVersionInfo(self):
# Query hardware for details if we don't have them already
keys = [configKeys.VERINFO_MODEL, configKeys.VERINFO_SN,
configKeys.VERINFO_SW, configKeys.VERINFO_FW,
configKeys.VERINFO_REF, configKeys.VERINFO_UNITREV,
configKeys.VERINFO_HW]
if not all([key in self.versionInfo for key in keys]):
cmd = self.statQry(parent=self,
query=True,
verbose=self.verbose, logFile=self.logFile)
cmd.send( self.sendCommand, )
self._addLastCommandErrorInfo(cmd)
rspInfo = cmd.getResponseInfo()
if rspInfo is not None:
self._dictUpdate(self.versionInfo, rspInfo, {}, keys)
for key in keys:
if key not in self.versionInfo:
self.versionInfo[key] = "N/A"
return self.versionInfo
# OVERRIDE
##
# \protected
# \brief Queries hardware to determine the object's current configuration.
def _queryConfiguration(self):
# Call the base-class implementation
configKeys.Configurable._queryConfiguration(self)
# Call the radio function command
if self.funCmd is not None:
cmd = self.funCmd(parent=self,
query=True,
verbose=self.verbose, logFile=self.logFile)
cmd.send( self.sendCommand, )
self._addLastCommandErrorInfo(cmd)
rspInfo = cmd.getResponseInfo()
if rspInfo is not None:
for key in self.validConfigurationKeywords:
val = rspInfo.get(key, None)
if val is not None:
self.configuration[key] = val
# This radio has nothing further that it can configure
#-- End Radio Handler Objects --------------------------------------------------#
#-- NOTE: Radio handler objects for supporting specific radios need to be
# implemented under the CyberRadioDriver.radios package tree.
| 43.884634 | 168 | 0.591156 | 175,738 | 0.976718 | 0 | 0 | 20,995 | 0.116686 | 0 | 0 | 53,714 | 0.298532 |
a0fde969f3e2acaa6481f6fe003e765cdca46b4c | 1,686 | py | Python | alpha_zero/NeuralNet.py | blekinge/alpha-zero-general | 7cc33e9b2e40602549b59fe753956e69a56f51f1 | [
"MIT"
]
| null | null | null | alpha_zero/NeuralNet.py | blekinge/alpha-zero-general | 7cc33e9b2e40602549b59fe753956e69a56f51f1 | [
"MIT"
]
| null | null | null | alpha_zero/NeuralNet.py | blekinge/alpha-zero-general | 7cc33e9b2e40602549b59fe753956e69a56f51f1 | [
"MIT"
]
| null | null | null | from typing import List, Tuple
import numpy as np
from alpha_zero.Board import Board
class NeuralNet():
"""
This class specifies the base NeuralNet class. To define your own neural
network, subclass this class and implement the functions below. The neural
network does not consider the current player, and instead only deals with
the canonical form of the board.
See othello/NNet.py for an example implementation.
"""
def __init__(self, game):
pass
def train(self, examples: List[Tuple[Board,List[float],List[int]]]):
"""
This function trains the neural network with examples obtained from
self-play.
:param examples: a list of training examples, where each example is of form
(board, pi, v). pi is the MCTS informed policy vector for
the given board, and v is its value. The examples has
board in its canonical form.
"""
pass
def predict(self, board: Board) -> Tuple[np.array,float]:
"""
:param board: current board in its canonical form.
:returns pi: a policy vector for the current board- a numpy array of length game.getActionSize
:returns v: a float in [-1,1] that gives the value of the current board
"""
pass
def save_checkpoint(self, folder: str, filename:str):
"""
Saves the current neural network (with its parameters) in
folder/filename
"""
pass
def load_checkpoint(self, folder: str, filename:str):
"""
Loads parameters of the neural network from folder/filename
"""
pass
| 31.222222 | 102 | 0.6293 | 1,596 | 0.946619 | 0 | 0 | 0 | 0 | 0 | 0 | 1,186 | 0.70344 |
a0fef1eaf1459e3aa6754a55ca8204b402a0ab05 | 785 | py | Python | server/app/forms.py | zhancongc/bugaboo | ac78e7e0274492273554b089122196b7869e8bfb | [
"Apache-2.0"
]
| null | null | null | server/app/forms.py | zhancongc/bugaboo | ac78e7e0274492273554b089122196b7869e8bfb | [
"Apache-2.0"
]
| null | null | null | server/app/forms.py | zhancongc/bugaboo | ac78e7e0274492273554b089122196b7869e8bfb | [
"Apache-2.0"
]
| null | null | null | """
Project : bugaboo
Filename : forms.py
Author : zhancongc
Description : 表单管理
"""
from flask_wtf import FlaskForm
from wtforms import StringField, BooleanField, TextAreaField, SelectField, FileField, IntegerField, PasswordField, SubmitField
from wtforms.validators import DataRequired
class GodLoginForm(FlaskForm):
"""
登陆表单
"""
username = StringField('username', validators=[DataRequired()])
password = PasswordField('password', validators=[DataRequired()])
submit = SubmitField()
class ExchangeAwardForm(FlaskForm):
"""
登陆表单
"""
awardrecord_token = StringField('awardrecord_token', validators=[DataRequired()])
exchange_token = PasswordField('exchange_token', validators=[DataRequired()])
submit = SubmitField()
| 23.787879 | 126 | 0.718471 | 491 | 0.606922 | 0 | 0 | 0 | 0 | 0 | 0 | 214 | 0.264524 |
9d006b0d7e89fe26f4e43d422a80339277272355 | 3,836 | py | Python | synthdid/variance.py | MasaAsami/pysynthdid | 01afe33ae22f513c65f9cfdec56a4b21ca547c28 | [
"Apache-2.0"
]
| null | null | null | synthdid/variance.py | MasaAsami/pysynthdid | 01afe33ae22f513c65f9cfdec56a4b21ca547c28 | [
"Apache-2.0"
]
| null | null | null | synthdid/variance.py | MasaAsami/pysynthdid | 01afe33ae22f513c65f9cfdec56a4b21ca547c28 | [
"Apache-2.0"
]
| 2 | 2022-03-11T03:13:36.000Z | 2022-03-20T22:55:13.000Z | import pandas as pd
import numpy as np
from tqdm import tqdm
class Variance(object):
def estimate_variance(self, algo="placebo", replications=200):
"""
# algo
- placebo
## The following algorithms are omitted because they are not practical.
- bootstrap
- jackknife
"""
if algo == "placebo":
Y_pre_c = self.Y_pre_c.copy()
Y_post_c = self.Y_post_c.copy()
assert self.n_treat < Y_pre_c.shape[1]
control_names = Y_pre_c.columns
result_tau_sdid = []
result_tau_sc = []
result_tau_did = []
for i in tqdm(range(replications)):
# setup
np.random.seed(seed=self.random_seed + i)
placebo_t = np.random.choice(control_names, self.n_treat, replace=False)
placebo_c = [col for col in control_names if col not in placebo_t]
pla_Y_pre_t = Y_pre_c[placebo_t]
pla_Y_post_t = Y_post_c[placebo_t]
pla_Y_pre_c = Y_pre_c[placebo_c]
pla_Y_post_c = Y_post_c[placebo_c]
pla_result = pd.DataFrame(
{
"pla_actual_y": pd.concat([pla_Y_pre_t, pla_Y_post_t]).mean(
axis=1
)
}
)
post_placebo_treat = pla_result.loc[
self.post_term[0] :, "pla_actual_y"
].mean()
# estimation
## sdid
pla_zeta = self.est_zeta(pla_Y_pre_c)
pla_hat_omega = self.est_omega(pla_Y_pre_c, pla_Y_pre_t, pla_zeta)
pla_hat_lambda = self.est_lambda(pla_Y_pre_c, pla_Y_post_c)
## sc
pla_hat_omega_ADH = self.est_omega_ADH(pla_Y_pre_c, pla_Y_pre_t)
# prediction
## sdid
pla_hat_omega = pla_hat_omega[:-1]
pla_Y_c = pd.concat([pla_Y_pre_c, pla_Y_post_c])
n_features = pla_Y_pre_c.shape[1]
start_w = np.repeat(1 / n_features, n_features)
_intercept = (start_w - pla_hat_omega) @ pla_Y_pre_c.T @ pla_hat_lambda
pla_result["sdid"] = pla_Y_c.dot(pla_hat_omega) + _intercept
## sc
pla_result["sc"] = pla_Y_c.dot(pla_hat_omega_ADH)
# cal tau
## sdid
pre_sdid = pla_result["sdid"].head(len(pla_hat_lambda)) @ pla_hat_lambda
post_sdid = pla_result.loc[self.post_term[0] :, "sdid"].mean()
pre_treat = (pla_Y_pre_t.T @ pla_hat_lambda).values[0]
sdid_counterfuctual_post_treat = pre_treat + (post_sdid - pre_sdid)
result_tau_sdid.append(
post_placebo_treat - sdid_counterfuctual_post_treat
)
## sc
sc_counterfuctual_post_treat = pla_result.loc[
self.post_term[0] :, "sc"
].mean()
result_tau_sc.append(post_placebo_treat - sc_counterfuctual_post_treat)
# did
did_post_actural_treat = (
post_placebo_treat
- pla_result.loc[: self.pre_term[1], "pla_actual_y"].mean()
)
did_counterfuctual_post_treat = (
pla_Y_post_c.mean(axis=1).mean() - pla_Y_pre_c.mean(axis=1).mean()
)
result_tau_did.append(
did_post_actural_treat - did_counterfuctual_post_treat
)
return (
np.var(result_tau_sdid),
np.var(result_tau_sc),
np.var(result_tau_did),
)
| 36.884615 | 88 | 0.516945 | 3,772 | 0.983316 | 0 | 0 | 0 | 0 | 0 | 0 | 335 | 0.087331 |
9d01bb83bee5f2c4612c59332de6ea7b9e34ac2f | 681 | py | Python | todo/views.py | arascch/Todo_list | a4c88abaa4e6c1e158135b4fce4bcfbf64cb86e2 | [
"Apache-2.0"
]
| 1 | 2020-03-24T09:26:23.000Z | 2020-03-24T09:26:23.000Z | todo/views.py | arascch/Todo_list | a4c88abaa4e6c1e158135b4fce4bcfbf64cb86e2 | [
"Apache-2.0"
]
| null | null | null | todo/views.py | arascch/Todo_list | a4c88abaa4e6c1e158135b4fce4bcfbf64cb86e2 | [
"Apache-2.0"
]
| null | null | null | from django.shortcuts import render
from django.utils import timezone
from todo.models import Todo
from django.http import HttpResponseRedirect
def home(request):
todo_items = Todo.objects.all().order_by("-added_date")
return render(request , 'todo/index.html' , {"todo_items":todo_items})
def add_todo(request):
Current_date = timezone.now()
content = request.POST["content"]
created_obj = Todo.objects.create(added_date = Current_date , text = content )
length_of_todos = Todo.objects.all().count()
return HttpResponseRedirect('/')
def delete_todo(request , todo_id):
Todo.objects.get(id = todo_id).delete()
return HttpResponseRedirect('/')
| 35.842105 | 82 | 0.737151 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.0837 |
9d02e73cfc6d5e0a0462f594bbcafd9199cb2c88 | 816 | py | Python | Easy/Hangman/HangMan - Stage 6.py | michael-act/HyperSkill | ce16eb3b6f755f7f8f21a57ef2679fcb8a4bd55c | [
"MIT"
]
| 1 | 2020-11-17T18:09:30.000Z | 2020-11-17T18:09:30.000Z | Easy/Hangman/HangMan - Stage 6.py | michael-act/HyperSkill | ce16eb3b6f755f7f8f21a57ef2679fcb8a4bd55c | [
"MIT"
]
| null | null | null | Easy/Hangman/HangMan - Stage 6.py | michael-act/HyperSkill | ce16eb3b6f755f7f8f21a57ef2679fcb8a4bd55c | [
"MIT"
]
| null | null | null | import random
category = ['python', 'java', 'kotlin', 'javascript']
computer = random.choice(category)
hidden = list(len(computer) * "-")
print("H A N G M A N")
counter = 8
while counter > 0:
print()
print("".join(hidden))
letter = input("Input a letter: ")
if (letter in hidden) or (letter in hidden and times == 7):
counter -= 1
print("No improvements")
elif letter in set(computer):
where = 0
for i in range(computer.count(letter)):
where = computer.index(letter, 0 + where)
hidden[where] = letter
where += where + 1
if "-" not in hidden:
print()
print("".join(hidden))
print("You guessed the word!")
print("You survived!")
break
else:
counter -= 1
print("No such letter in the word")
print(counter)
else:
print("You are hanged!") | 24 | 61 | 0.616422 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 177 | 0.216912 |
9d03157b2910202ba3c53d84197f7000003a404d | 6,536 | py | Python | sklcc/taskEdit.py | pyxuweitao/MSZ_YCL | 23323c4660f44af0a45d6ab81cd496b81976f5a0 | [
"Apache-2.0"
]
| null | null | null | sklcc/taskEdit.py | pyxuweitao/MSZ_YCL | 23323c4660f44af0a45d6ab81cd496b81976f5a0 | [
"Apache-2.0"
]
| null | null | null | sklcc/taskEdit.py | pyxuweitao/MSZ_YCL | 23323c4660f44af0a45d6ab81cd496b81976f5a0 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
所有任务task相关功能函数
"""
__author__ = "XuWeitao"
import CommonUtilities
import rawSql
def getTasksList(UserID):
"""
获取任务列表,包括任务流水号,创建时间,最近一次修改时间,货号,色号以及到料时间和创建人
:param UserID:创建人ID,如果为ALL则返回所有的任务列表
:return:{
"SerialNo":任务流水号, "CreateTime":任务创建时间, "LastModifiedTime":最近一次修改时间,
"ProductNo":货号, "ColorNo":色号, "ArriveTime":到料时间, "Name":创建人名,
"GongYingShang":{"id":供应商代码, "name":供应商名称},
"WuLiao":{"id":材料名称ID, "name":材料名称, "cata":材料种类名称},
"DaoLiaoZongShu":到料总数, "DanWei":{"id":到料总数单位ID, "name":到料总数单位}
"DaoLiaoZongShu2":到料总数, "DanWei":{"id":到料总数单位ID, "name":到料总数单位},
"XieZuoRen":当前任务的协作人员,不包含任务创建者
}
"""
raw = rawSql.Raw_sql()
raw.sql = """SELECT SerialNo, CONVERT(VARCHAR(16), CreateTime, 20) CreateTime, CONVERT(VARCHAR(16), LastModifiedTime, 20) LastModifiedTime,
ProductNo, ColorNo, CONVERT(VARCHAR(10), ArriveTime, 20) ArriveTime, dbo.getUserNameByUserID(UserID), SupplierID,
dbo.getSupplierNameByID(SupplierID), MaterialID, dbo.getMaterialNameByID(MaterialID),
dbo.getMaterialTypeNameByID(dbo.getMaterialTypeIDByMaterialID(MaterialID)), DaoLiaoZongShu, UnitID,
dbo.getUnitNameByID(UnitID), DaoLiaoZongShu2, UnitID2, dbo.getUnitNameByID(UnitID2) AS DanWei2, Inspectors, UserID
FROM RMI_TASK WITH(NOLOCK)"""
#身为协作人也可以看到该任务
if UserID != 'ALL':
raw.sql += " WHERE CHARINDEX('%s', Inspectors) > 0 AND State = 2" % UserID
else:
raw.sql += " WHERE State = 0"
res = raw.query_all()
jsonReturn = list()
for row in res:
#协作人以@字符分割,但是其中包含创建任务人
Inspectors = row[18].split('@')
InspectorList = list()
for inspectorNo in Inspectors:
if inspectorNo == row[19]:
continue
raw.sql = "SELECT DBO.getUserNameByUserID('%s')"%inspectorNo
inspectorName = raw.query_one()
if inspectorName:
inspectorName = inspectorName[0]
InspectorList.append({'Name':inspectorName, 'ID':inspectorNo})
jsonReturn.append({
"SerialNo":row[0], "CreateTime":row[1], "LastModifiedTime":row[2],
"ProductNo":row[3], "ColorNo":row[4], "ArriveTime":row[5], "Name":row[6],
"GongYingShang":{"id":row[7], "name":row[8]},
"WuLiao":{"id":row[9], "name":row[10], "cata":row[11]},
"DaoLiaoZongShu":row[12], "DanWei":{"id":row[13], "name":row[14]},
"DaoLiaoZongShu2":row[15], "DanWei2":{"id":row[16], "name":row[17]},
"XieZuoRen":InspectorList
})
return jsonReturn
def editTaskInfo(taskInfo, userID):
"""
根据isNew字段以及传入的信息来新插入或先删除再插入一个任务数据。
:param taskInfo:任务相关信息
:param userID:用户ID
:return:返回编辑成功与否的标志
"""
raw = rawSql.Raw_sql()
#是否退回的判定
if "isReturn" in taskInfo:
raw.sql = "UPDATE RMI_TASK WITH(ROWLOCK) SET State = 2 WHERE SerialNo = '%s'"%taskInfo['SerialNo']
raw.update()
else:
isNew = True if taskInfo['isNew'] == "True" else False
#如果没有设置为None,即使前台返回null,经JSON转义仍为None
taskInfo['DaoLiaoZongShu2'] = False if 'DaoLiaoZongShu2' not in taskInfo else taskInfo['DaoLiaoZongShu2']
taskInfo['DanWei2'] = {'id':None} if 'DanWei2' not in taskInfo else taskInfo['DanWei2']
#前端传来的协作者不包含当前登录人员ID
if 'XieZuoRen' in taskInfo:
taskInfo['XieZuoRen'].append({'ID':userID})
taskInfo['Inspectors'] = "@".join([User['ID'] for User in taskInfo['XieZuoRen']])
else:
taskInfo['Inspectors'] = userID
if isNew:
raw.sql = """INSERT INTO RMI_TASK WITH(ROWLOCK) (CreateTime, LastModifiedTime, ProductNo, ColorNo,
ArriveTime, UserID, FlowID, MaterialID, SupplierID, UnitID, DaoLiaoZongShu, DaoLiaoZongShu2, UnitID2, Inspectors)
VALUES ( getdate(), getdate(),'%s','%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', %s, %s, '%s' );""" % (
taskInfo['ProductNo'], taskInfo['ColorNo'], taskInfo['ArriveTime'][:10], userID,
taskInfo['FlowID'], taskInfo['WuLiao']['id'], taskInfo['GongYingShang']['id'],
taskInfo['DanWei']['id'], taskInfo['DaoLiaoZongShu'],
"'"+unicode(taskInfo['DaoLiaoZongShu2'])+"'" if taskInfo['DaoLiaoZongShu2'] else "NULL",
"'"+unicode(taskInfo['DanWei2']['id'])+"'" if taskInfo['DanWei2']['id'] else "NULL", taskInfo['Inspectors'] )
raw.update()
#辅料表页面右上角快速新建任务流水号的返回
raw.sql = "SELECT TOP 1 SerialNo FROM RMI_TASK WHERE UserID = '%s' AND State = 2 ORDER BY CreateTime desc"%userID
return raw.query_one()[0]
else:
raw.sql = """UPDATE RMI_TASK WITH(ROWLOCK) SET MaterialID = '%s',SupplierID = '%s', UnitID = '%s',
DaoLiaoZongShu = '%s', ProductNo = '%s', ColorNo = '%s', ArriveTime = '%s', DaoLiaoZongShu2 = %s,
UnitID2 = %s, Inspectors = '%s'
WHERE SerialNo = '%s'""" % (
taskInfo['WuLiao']['id'], taskInfo['GongYingShang']['id'], taskInfo['DanWei']['id'],
taskInfo['DaoLiaoZongShu'], taskInfo['ProductNo'], taskInfo['ColorNo'],
taskInfo['ArriveTime'][:10].replace('-',''),
"'"+unicode(taskInfo['DaoLiaoZongShu2'])+"'" if taskInfo['DaoLiaoZongShu2'] else "NULL",
"'"+unicode(taskInfo['DanWei2']['id'])+"'" if taskInfo['DanWei2']['id'] else "NULL", taskInfo['Inspectors'],
taskInfo['SerialNo'])
raw.update()
def getFlowList():
"""
从数据库获取所有的工作流列表
:return:返回{"name":FlowName,"value":FlowID}
"""
raw = rawSql.Raw_sql()
raw.sql = "SELECT FlowID AS value, FlowName AS name FROM RMI_WORK_FLOW WITH(NOLOCK)"
res, columns = raw.query_all(needColumnName=True)
return CommonUtilities.translateQueryResIntoDict(columns, res)
def commitTaskBySerialNo(SerialNo):
"""
根据流水号通过任务的函数
:param SerialNo: 任务流水号
:return:
"""
raw = rawSql.Raw_sql()
raw.sql = "UPDATE RMI_TASK SET State = 0 WHERE SerialNo = '%s'"%SerialNo
raw.update()
return
def deleteTaskBySerialNo(SerialNo):
"""
删除任务,只删除RMI_TASK表中的数据,触发器跟踪删除其他表相关信息
:param SerialNo:任务流水号
:return:
"""
#TODO:触发器update_other_tables_when_delete_rmi_task更新删除F01之外其他表格的数据
raw = rawSql.Raw_sql()
raw.sql = "DELETE FROM RMI_TASK WHERE SerialNo='%s'"%SerialNo
raw.update()
#call trigger delete all task info in rmi_task_process...
return
def getAllMaterialByName(fuzzyName):
"""
根据模糊输入获取所有材料的名称
:param fuzzyName:模糊输入
:return:{'id':材料名称ID,'name':材料名称,'cata':材料种类名称}
"""
raw = rawSql.Raw_sql()
raw.sql = """SELECT MaterialID AS id, MaterialName AS name, dbo.getMaterialTypeNameByID(MaterialTypeID) AS cata
FROM RMI_MATERIAL_NAME WITH(NOLOCK)"""
if fuzzyName:
raw.sql += """ WHERE MaterialName LIKE '%%%%%s%%%%'"""%fuzzyName
res, cols = raw.query_all(needColumnName=True)
return CommonUtilities.translateQueryResIntoDict(cols, res)
else: #如果为空返回空数据,否则前端卡顿
return [{"name":u'请输入关键字', "id":"", "cata":""}]
| 41.106918 | 140 | 0.671665 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,903 | 0.657768 |
9d064db24d2e119266bc78323c4a529982872160 | 744 | py | Python | Leetcoding-Actions/my-weekly-DSA-challenge/2020-w44-p0200-Number-of-Islands.py | shoaibur/SWE | 1e114a2750f2df5d6c50b48c8e439224894d65da | [
"MIT"
]
| 1 | 2020-11-14T18:28:13.000Z | 2020-11-14T18:28:13.000Z | Leetcoding-Actions/my-weekly-DSA-challenge/2020-w44-p0200-Number-of-Islands.py | shoaibur/SWE | 1e114a2750f2df5d6c50b48c8e439224894d65da | [
"MIT"
]
| null | null | null | Leetcoding-Actions/my-weekly-DSA-challenge/2020-w44-p0200-Number-of-Islands.py | shoaibur/SWE | 1e114a2750f2df5d6c50b48c8e439224894d65da | [
"MIT"
]
| null | null | null | class Solution:
def numIslands(self, grid: List[List[str]]) -> int:
'''
T: O(mn) and S: O(1)
'''
if not grid: return 0
nrow, ncol = len(grid), len(grid[0])
def exploreIsland(grid, i, j):
if i < 0 or i > nrow - 1 or j < 0 or j > ncol-1 or grid[i][j] == "0":
return
grid[i][j] = "0"
for (ni, nj) in [(i-1, j), (i+1, j), (i, j-1), (i, j+1)]:
exploreIsland(grid, ni, nj)
count_island = 0
for i in range(nrow):
for j in range(ncol):
if grid[i][j] == "1":
exploreIsland(grid, i, j)
count_island += 1
return count_island
| 32.347826 | 81 | 0.415323 | 743 | 0.998656 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.071237 |
9d07e918f729733a967e2d67e465e2cf7ce7d2a4 | 11,417 | py | Python | tensor2tensor/models/revnet.py | ysglh/tensor2tensor | f55462a9928f3f8af0b1275a4fb40d13cae6cc79 | [
"Apache-2.0"
]
| null | null | null | tensor2tensor/models/revnet.py | ysglh/tensor2tensor | f55462a9928f3f8af0b1275a4fb40d13cae6cc79 | [
"Apache-2.0"
]
| null | null | null | tensor2tensor/models/revnet.py | ysglh/tensor2tensor | f55462a9928f3f8af0b1275a4fb40d13cae6cc79 | [
"Apache-2.0"
]
| null | null | null | # coding=utf-8
# Copyright 2017 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a RevNet with the bottleneck residual function.
Implements the following equations described in the RevNet paper:
y1 = x1 + f(x2)
y2 = x2 + g(y1)
However, in practice, the authors use the following equations to downsample
tensors inside a RevNet block:
y1 = h(x1) + f(x2)
y2 = h(x2) + g(y1)
In this case, h is the downsampling function used to change number of channels.
These modified equations are evident in the authors' code online:
https://github.com/renmengye/revnet-public
For reference, the original paper can be found here:
https://arxiv.org/pdf/1707.04585.pdf
"""
# Dependency imports
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import rev_block
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow as tf
CONFIG = {'2d': {'conv': tf.layers.conv2d,
'max_pool': tf.layers.max_pooling2d,
'avg_pool': tf.layers.average_pooling2d,
'split_axis': 3,
'reduction_dimensions': [1, 2]
},
'3d': {'conv': tf.layers.conv3d,
'max_pool': tf.layers.max_pooling3d,
'avg_pool': tf.layers.average_pooling2d,
'split_axis': 4,
'reduction_dimensions': [1, 2, 3]
}
}
def f(x, depth1, depth2, dim='2d', first_batch_norm=True, layer_stride=1,
training=True, padding='SAME'):
"""Applies bottleneck residual function for 104-layer RevNet.
Args:
x: input tensor
depth1: Number of output channels for the first and second conv layers.
depth2: Number of output channels for the third conv layer.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
first_batch_norm: Whether to keep the first batch norm layer or not.
Typically used in the first RevNet block.
layer_stride: Stride for the first conv filter. Note that this particular
104-layer RevNet architecture only varies the stride for the first conv
filter. The stride for the second conv filter is always set to 1.
training: True for train phase, False for eval phase.
padding: Padding for each conv layer.
Returns:
Output tensor after applying residual function for 104-layer RevNet.
"""
conv = CONFIG[dim]['conv']
with tf.variable_scope('f'):
if first_batch_norm:
net = tf.layers.batch_normalization(x, training=training)
net = tf.nn.relu(net)
else:
net = x
net = conv(net, depth1, 1, strides=layer_stride,
padding=padding, activation=None)
net = tf.layers.batch_normalization(net, training=training)
net = tf.nn.relu(net)
net = conv(net, depth1, 3, strides=1,
padding=padding, activation=None)
net = tf.layers.batch_normalization(net, training=training)
net = tf.nn.relu(net)
net = conv(net, depth2, 1, strides=1,
padding=padding, activation=None)
return net
def h(x, output_channels, dim='2d', layer_stride=1, scope='h'):
"""Downsamples 'x' using a 1x1 convolution filter and a chosen stride.
Args:
x: input tensor of size [N, H, W, C]
output_channels: Desired number of output channels.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
layer_stride: What stride to use. Usually 1 or 2.
scope: Optional variable scope for the h function.
This function uses a 1x1 convolution filter and a chosen stride to downsample
the input tensor x.
Returns:
A downsampled tensor of size [N, H/2, W/2, output_channels] if layer_stride
is 2, else returns a tensor of size [N, H, W, output_channels] if
layer_stride is 1.
"""
conv = CONFIG[dim]['conv']
with tf.variable_scope(scope):
x = conv(x, output_channels, 1, strides=layer_stride, padding='SAME',
activation=None)
return x
def init(images, num_channels, dim='2d', training=True, scope='init'):
"""Standard ResNet initial block used as first RevNet block.
Args:
images: [N, H, W, 3] tensor of input images to the model.
num_channels: Output depth of convolutional layer in initial block.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
training: True for train phase, False for eval phase.
scope: Optional scope for the init block.
Returns:
Two [N, H, W, C] output activations from input images.
"""
conv = CONFIG[dim]['conv']
pool = CONFIG[dim]['max_pool']
with tf.variable_scope(scope):
net = conv(images, num_channels, 7, strides=2,
padding='SAME', activation=None)
net = tf.layers.batch_normalization(net, training=training)
net = tf.nn.relu(net)
net = pool(net, pool_size=3, strides=2)
x1, x2 = tf.split(net, 2, axis=CONFIG[dim]['split_axis'])
return x1, x2
def unit(x1, x2, block_num, depth1, depth2, num_layers, dim='2d',
first_batch_norm=True, stride=1, training=True):
"""Implements bottleneck RevNet unit from authors' RevNet-104 architecture.
Args:
x1: [N, H, W, C] tensor of network activations.
x2: [N, H, W, C] tensor of network activations.
block_num: integer ID of block
depth1: First depth in bottleneck residual unit.
depth2: Second depth in bottleneck residual unit.
num_layers: Number of layers in the RevNet block.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
first_batch_norm: Whether to keep the first batch norm layer or not.
Typically used in the first RevNet block.
stride: Stride for the residual function.
training: True for train phase, False for eval phase.
Returns:
Two [N, H, W, C] output activation tensors.
"""
scope_name = 'unit_%d' % block_num
with tf.variable_scope(scope_name):
# Manual implementation of downsampling
with tf.variable_scope('downsampling'):
with tf.variable_scope('x1'):
hx1 = h(x1, depth2, dim=dim, layer_stride=stride)
fx2 = f(x2, depth1, depth2, dim=dim, layer_stride=stride,
first_batch_norm=first_batch_norm, training=training)
x1 = hx1 + fx2
with tf.variable_scope('x2'):
hx2 = h(x2, depth2, dim=dim, layer_stride=stride)
fx1 = f(x1, depth1, depth2, dim=dim, training=training)
x2 = hx2 + fx1
# Full block using memory-efficient rev_block implementation.
with tf.variable_scope('full_block'):
residual_func = lambda x: f(x, depth1, depth2, dim=dim, training=training)
x1, x2 = rev_block.rev_block(x1, x2,
residual_func,
residual_func,
num_layers=num_layers)
return x1, x2
def final_block(x1, x2, dim='2d', training=True, scope='final_block'):
"""Converts activations from last RevNet block to pre-logits.
Args:
x1: [NxHxWxC] tensor of network activations.
x2: [NxHxWxC] tensor of network activations.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
training: True for train phase, False for eval phase.
scope: Optional variable scope for the final block.
Returns:
[N, hidden_dim] pre-logits tensor from activations x1 and x2.
"""
# Final batch norm and relu
with tf.variable_scope(scope):
y = tf.concat([x1, x2], axis=CONFIG[dim]['split_axis'])
y = tf.layers.batch_normalization(y, training=training)
y = tf.nn.relu(y)
# Global average pooling
net = tf.reduce_mean(y, CONFIG[dim]['reduction_dimensions'],
name='final_pool', keep_dims=True)
return net
def revnet104(inputs, hparams, reuse=None):
"""Uses Tensor2Tensor memory optimized RevNet block to build a RevNet.
Args:
inputs: [NxHxWx3] tensor of input images to the model.
hparams: HParams object that contains the following parameters,
in addition to the parameters contained in the basic_params1() object in
the common_hparams module:
num_channels_first - A Python list where each element represents the
depth of the first and third convolutional layers in the bottleneck
residual unit for a given block.
num_channels_second - A Python list where each element represents the
depth of the second convolutional layer in the bottleneck residual
unit for a given block.
num_layers_per_block - A Python list containing the number of RevNet
layers for each block.
first_batch_norm - A Python list containing booleans representing the
presence of a batch norm layer at the beginning of a given block.
strides - A Python list containing integers representing the stride of
the residual function for each block.
num_channels_init_block - An integer representing the number of channels
for the convolutional layer in the initial block.
dimension - A string (either "2d" or "3d") that decides if the RevNet is
2-dimensional or 3-dimensional.
reuse: Whether to reuse the default variable scope.
Returns:
[batch_size, hidden_dim] pre-logits tensor from the bottleneck RevNet.
"""
training = hparams.mode == tf.estimator.ModeKeys.TRAIN
with tf.variable_scope('RevNet104', reuse=reuse):
x1, x2 = init(inputs,
num_channels=hparams.num_channels_init_block,
dim=hparams.dim,
training=training)
for block_num in range(1, len(hparams.num_layers_per_block)):
block = {'depth1': hparams.num_channels_first[block_num],
'depth2': hparams.num_channels_second[block_num],
'num_layers': hparams.num_layers_per_block[block_num],
'first_batch_norm': hparams.first_batch_norm[block_num],
'stride': hparams.strides[block_num]}
x1, x2 = unit(x1, x2, block_num, dim=hparams.dim, training=training,
**block)
pre_logits = final_block(x1, x2, dim=hparams.dim, training=training)
return pre_logits
@registry.register_model
class Revnet104(t2t_model.T2TModel):
def body(self, features):
return revnet104(features['inputs'], self.hparams)
@registry.register_hparams
def revnet_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.add_hparam('num_channels_first', [64, 128, 256, 416])
hparams.add_hparam('num_channels_second', [256, 512, 1024, 1664])
hparams.add_hparam('num_layers_per_block', [1, 1, 10, 1])
hparams.add_hparam('first_batch_norm', [False, True, True, True])
hparams.add_hparam('strides', [1, 2, 2, 2])
hparams.add_hparam('num_channels_init_block', 32)
hparams.add_hparam('dim', '2d')
hparams.optimizer = 'Momentum'
hparams.learning_rate = 0.01
hparams.weight_decay = 1e-4
# Can run with a batch size of 128 with Problem ImageImagenet224
hparams.tpu_batch_size_per_shard = 128
return hparams
| 38.441077 | 80 | 0.681177 | 120 | 0.010511 | 0 | 0 | 877 | 0.076815 | 0 | 0 | 6,530 | 0.571954 |
9d08e38fa29119640133acdff959362b1c00409d | 4,166 | py | Python | tests/unit/test_services.py | BlooAM/Online-shopping-app | aa68d258fe32bf5a792e534dddd9def7c25460e2 | [
"MIT"
]
| null | null | null | tests/unit/test_services.py | BlooAM/Online-shopping-app | aa68d258fe32bf5a792e534dddd9def7c25460e2 | [
"MIT"
]
| null | null | null | tests/unit/test_services.py | BlooAM/Online-shopping-app | aa68d258fe32bf5a792e534dddd9def7c25460e2 | [
"MIT"
]
| null | null | null | import pytest
from datetime import date, timedelta
from adapters import repository
from domain.model import Batch, OrderLine, allocate, OutOfStock
from domain import model
from service_layer import handlers, unit_of_work
class FakeSession:
def __init__(self):
self.committed = False
def commit(self):
self.commited = True
class FakeRepository(repository.AbstractRepository):
def __init__(self, products):
super().__init__()
self._products = set(products)
def _add(self, product):
self._products.add(product)
def _get(self, sku):
return next((p for p in self._products if p.sku == sku), None)
class FakeUnitOfWork(unit_of_work.AbstractUnitOfWork):
def __init__(self):
self.batches = FakeRepository([])
self.committed = False
def _commit(self):
self.committed = True
def rollback(self):
pass
today = date.today()
tomorrow = today + timedelta(days=1)
later = tomorrow + timedelta(days=10)
def test_add_batch():
uow = FakeUnitOfWork()
handlers.add_batch("b1", "CRUNCHY-ARMCHAIR", 100, None, uow)
assert uow.batches.get("b1") is not None
assert uow.committed
def test_prefers_current_stock_batches_to_shipments():
in_stock_batch = Batch("in-stock-batch", "RETRO-CLOCK", 100, eta=None)
shipment_batch = Batch("shipment-batch", "RETRO-CLOCK", 100, eta=tomorrow)
line = OrderLine("oref", "RETRO-CLOCK", 10)
allocate(line, [in_stock_batch, shipment_batch])
assert in_stock_batch.available_quantity == 90
assert shipment_batch.available_quantity == 100
def test_prefers_warehouse_batches_to_shipments():
in_stock_batch = Batch("in-stock-batch", "RETRO-CLOCK", 100, eta=None)
shipment_batch = Batch("shipment-batch", "RETRO-CLOCK", 100, eta=tomorrow)
repo = FakeRepository([in_stock_batch, shipment_batch])
session = FakeSession()
line = OrderLine('oref', "RETRO-CLOCK", 10)
handlers.allocate(line, repo, session)
assert in_stock_batch.available_quantity == 90
assert shipment_batch.available_quantity == 100
def test_prefers_earlier_batches():
earliest = Batch("speedy-batch", "MINIMALIST-SPOON", 100, eta=today)
medium = Batch("normal-batch", "MINIMALIST-SPOON", 100, eta=tomorrow)
latest = Batch("slow-batch", "MINIMALIST-SPOON", 100, eta=later)
line = OrderLine("order1", "MINIMALIST-SPOON", 10)
allocate(line, [medium, earliest, latest])
assert earliest.available_quantity == 90
assert medium.available_quantity == 100
assert latest.available_quantity == 100
def test_returns_allocated_batch_ref():
in_stock_batch = Batch("in-stock-batch-ref", "HIGHBROW-POSTER", 100, eta=None)
shipment_batch = Batch("shipment-batch-ref", "HIGHBROW-POSTER", 100, eta=tomorrow)
line = OrderLine("oref", "HIGHBROW-POSTER", 10)
allocation = allocate(line, [in_stock_batch, shipment_batch])
assert allocation == in_stock_batch.reference
def test_raises_out_of_stock_exception_if_cannot_allocate():
batch = Batch('batch1', 'SMALL-FORK', 10, eta=today)
allocate(OrderLine('order1', 'SMALL-FORK', 10), [batch])
with pytest.raises(OutOfStock, match='SMALL-FORK'):
allocate(OrderLine('order2', 'SMALL-FORK', 1), [batch])
def test_commits():
line = model.OrderLine("o1", "OMINOUS-MIRROR", 10)
batch = model.Batch("b1", "OMINOUS-MIRROR", 100, eta=None)
repo = FakeRepository([batch])
session = FakeSession()
handlers.allocate("o1", "OMINOUS-MIRROR", 10, repo, session)
assert session.committed is True
def test_allocate_returns_allocation():
uow = FakeUnitOfWork()
handlers.add_batch("batch1", "COMPLICATED-LAMP", 100, None, uow)
result = handlers.allocate("o1", "COMPLICATED-LAMP", 10, uow)
assert result == "bach1"
def test_error_for_invalid_sku():
line = model.OrderLine("o1", "NONEXISTENTSKU", 10)
batch = model.Batch("b1", "AREALSKU", 100, eta=None)
repo = FakeRepository([batch])
with pytest.raises(handlers.InvalidSku, match="Invalid name of SKU: NONEXISTENTSKU"):
handlers.allocate("o1", "NONEXISTENTSKU", 10, repo, FakeSession())
| 32.046154 | 89 | 0.702112 | 683 | 0.163946 | 0 | 0 | 0 | 0 | 0 | 0 | 675 | 0.162026 |
9d08ebe64750ed4ee86af0207bca624b0391ff75 | 1,786 | py | Python | DQMOffline/L1Trigger/python/L1TEGammaOffline_cfi.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
]
| null | null | null | DQMOffline/L1Trigger/python/L1TEGammaOffline_cfi.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
]
| null | null | null | DQMOffline/L1Trigger/python/L1TEGammaOffline_cfi.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
]
| null | null | null | import FWCore.ParameterSet.Config as cms
electronEfficiencyThresholds = [36, 68, 128, 176]
electronEfficiencyBins = []
electronEfficiencyBins.extend(list(xrange(0, 120, 10)))
electronEfficiencyBins.extend(list(xrange(120, 180, 20)))
electronEfficiencyBins.extend(list(xrange(180, 300, 40)))
electronEfficiencyBins.extend(list(xrange(300, 400, 100)))
# just copy for now
photonEfficiencyThresholds = electronEfficiencyThresholds
photonEfficiencyBins = electronEfficiencyBins
l1tEGammaOfflineDQM = cms.EDAnalyzer(
"L1TEGammaOffline",
electronCollection=cms.InputTag("gedGsfElectrons"),
photonCollection=cms.InputTag("photons"),
caloJetCollection=cms.InputTag("ak4CaloJets"),
caloMETCollection=cms.InputTag("caloMet"),
conversionsCollection=cms.InputTag("allConversions"),
PVCollection=cms.InputTag("offlinePrimaryVerticesWithBS"),
beamSpotCollection=cms.InputTag("offlineBeamSpot"),
TriggerEvent=cms.InputTag('hltTriggerSummaryAOD', '', 'HLT'),
TriggerResults=cms.InputTag('TriggerResults', '', 'HLT'),
# last filter of HLTEle27WP80Sequence
TriggerFilter=cms.InputTag('hltEle27WP80TrackIsoFilter', '', 'HLT'),
TriggerPath=cms.string('HLT_Ele27_WP80_v13'),
stage2CaloLayer2EGammaSource=cms.InputTag("caloStage2Digis", "EGamma"),
histFolder=cms.string('L1T/L1TEGamma'),
electronEfficiencyThresholds=cms.vdouble(electronEfficiencyThresholds),
electronEfficiencyBins=cms.vdouble(electronEfficiencyBins),
photonEfficiencyThresholds=cms.vdouble(photonEfficiencyThresholds),
photonEfficiencyBins=cms.vdouble(photonEfficiencyBins),
)
l1tEGammaOfflineDQMEmu = l1tEGammaOfflineDQM.clone(
stage2CaloLayer2EGammaSource=cms.InputTag("simCaloStage2Digis"),
histFolder=cms.string('L1TEMU/L1TEGamma'),
)
| 37.208333 | 75 | 0.783875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 370 | 0.207167 |
9d092f6e945eea14883d51652329fcd4951dee46 | 18,548 | py | Python | ion_networks/numba_functions.py | swillems/ion_networks | 5304a92248ec007ac2253f246a3d44bdb58ae110 | [
"MIT"
]
| 2 | 2020-10-28T16:11:56.000Z | 2020-12-03T13:19:18.000Z | ion_networks/numba_functions.py | swillems/ion_networks | 5304a92248ec007ac2253f246a3d44bdb58ae110 | [
"MIT"
]
| null | null | null | ion_networks/numba_functions.py | swillems/ion_networks | 5304a92248ec007ac2253f246a3d44bdb58ae110 | [
"MIT"
]
| null | null | null | #!python
# external
import numpy as np
import numba
@numba.njit(nogil=True, cache=True)
def longest_increasing_subsequence(sequence):
# TODO:Docstring
M = np.zeros(len(sequence) + 1, np.int64)
P = np.zeros(len(sequence), np.int64)
max_subsequence_length = 0
for current_index, current_element in enumerate(sequence):
low_bound = 1
high_bound = max_subsequence_length
while low_bound <= high_bound:
mid = (low_bound + high_bound) // 2
if sequence[M[mid]] <= current_element:
low_bound = mid + 1
else:
high_bound = mid - 1
subsequence_length = low_bound
P[current_index] = M[subsequence_length - 1]
M[subsequence_length] = current_index
if subsequence_length > max_subsequence_length:
max_subsequence_length = subsequence_length
longest_increasing_subsequence = np.repeat(0, max_subsequence_length)
index = M[max_subsequence_length]
for current_index in range(max_subsequence_length - 1, -1, -1):
longest_increasing_subsequence[current_index] = index
index = P[index]
return longest_increasing_subsequence
@numba.njit(nogil=True, cache=True)
def increase_buffer(buffer, max_batch=10**7):
new_buffer = np.empty(buffer.shape[0] + max_batch, np.int64)
new_buffer[:len(buffer)] = buffer
return new_buffer
@numba.njit(nogil=True, cache=True)
def quick_align(
self_mzs,
other_mzs,
self_mz_order,
other_mz_order,
other_rt_order,
ppm
):
# TODO: Docstring
max_mz_diff = 1 + ppm * 10**-6
low_limits = np.searchsorted(
self_mzs[self_mz_order],
other_mzs[other_mz_order] / max_mz_diff,
"left"
)[other_rt_order]
high_limits = np.searchsorted(
self_mzs[self_mz_order],
other_mzs[other_mz_order] * max_mz_diff,
"right"
)[other_rt_order]
diffs = high_limits - low_limits
ends = np.cumsum(diffs)
self_indices = np.empty(ends[-1], np.int64)
for l, h, e, d in zip(low_limits, high_limits, ends, diffs):
self_indices[e - d: e] = self_mz_order[l: h]
selection = longest_increasing_subsequence(self_indices)
self_indices_mask = np.empty(len(selection) + 2, np.int64)
self_indices_mask[0] = 0
self_indices_mask[1: -1] = self_indices[selection]
self_indices_mask[-1] = len(self_mzs) - 1
other_indices_mask = np.empty(len(selection) + 2, np.int64)
other_indices_mask[0] = 0
other_indices = np.repeat(
np.arange(len(other_rt_order)),
high_limits - low_limits
)
other_indices_mask[1: -1] = other_indices[selection]
other_indices_mask[-1] = len(other_mzs) - 1
return self_indices_mask, other_indices_mask
@numba.njit(nogil=True, cache=True)
def align_coordinates(
queries,
lower_limits,
upper_limits,
self_coordinates,
other_coordinates,
max_errors,
# kind="euclidean"
):
indptr = np.zeros(len(queries), np.int64)
indices = np.empty(10**7, np.int64)
total = 0
for index, query in enumerate(queries):
low_limit = lower_limits[query]
high_limit = upper_limits[query]
candidate_count = high_limit - low_limit
if candidate_count == 0:
continue
elif (candidate_count + total) >= len(indices):
indices = increase_buffer(indices)
dists = other_coordinates[low_limit: high_limit] - self_coordinates[query]
# TODO: what if error==0?
# if kind == "euclidean":
dists /= max_errors
dists = dists**2
projected_dists = np.sum(dists, axis=1)
projected_dists = np.sqrt(projected_dists)
candidates = low_limit + np.flatnonzero(projected_dists <= 1)
# elif kind == "manhattan":
# projected_dists = np.all(dists < max_errors, axis=1)
# candidates = low_limit + np.flatnonzero(projected_dists)
candidate_count = len(candidates)
indices[total: total + candidate_count] = candidates
indptr[index] = candidate_count
total += candidate_count
return (indptr, indices[:total])
@numba.njit(nogil=True, cache=True)
def make_symmetric(
indptr,
indices,
):
# TODO: multithread?
offsets = np.cumsum(np.bincount(indices))
indptr_ = indptr.copy()
indptr_[1:1 + offsets.shape[0]] += offsets
indptr_[1 + offsets.shape[0]:] += offsets[-1]
indices_ = np.empty(indptr_[-1], np.int64)
pointers_ = np.empty_like(indices_)
offsets = indptr_[:-1] + np.diff(indptr)
for index in range(indptr.shape[0] - 1):
start = indptr[index]
end = indptr[index + 1]
current_indices = indices[start: end]
pointers = np.arange(start, end)
start_ = indptr_[index]
end_ = start_ + current_indices.shape[0]
indices_[start_: end_] = current_indices
pointers_[start_: end_] = pointers
current_offsets = offsets[current_indices]
indices_[current_offsets] = index
pointers_[current_offsets] = pointers
offsets[current_indices] += 1
return indptr_, indices_, pointers_
@numba.njit(nogil=True, cache=True)
def align_edges(
queries,
self_indptr,
self_indices,
self_pointers,
other_indptr,
other_indices,
alignment,
alignment_mask,
):
self_pointers_ = np.empty(10**7, np.int64)
other_pointers_ = np.empty(10**7, np.int64)
pointer_offset = 0
for index in queries:
possible_start = self_indptr[index]
possible_end = self_indptr[index + 1]
if possible_start == possible_end:
continue
current_index = alignment[index]
current_start = other_indptr[current_index]
current_end = other_indptr[current_index + 1]
if current_start == current_end:
continue
possible_indices = self_indices[possible_start: possible_end]
possible_mask = alignment_mask[possible_indices]
if not np.any(possible_mask):
continue
possible_indices = alignment[possible_indices[possible_mask]]
possible_pointers = self_pointers[possible_start: possible_end][
possible_mask
]
current_indices = other_indices[current_start: current_end]
candidates1 = np.searchsorted(
current_indices,
possible_indices,
"left"
)
candidates2 = np.searchsorted(
current_indices,
possible_indices,
"right"
)
overlap = np.flatnonzero(candidates2 != candidates1)
overlap_count = len(overlap)
if len(overlap) == 0:
continue
elif (overlap_count + pointer_offset) >= len(self_pointers_):
self_pointers_ = increase_buffer(self_pointers_)
other_pointers_ = increase_buffer(other_pointers_)
self_pointers_[
pointer_offset: pointer_offset + overlap_count
] = possible_pointers[overlap]
other_pointers_[
pointer_offset: pointer_offset + overlap_count
] = current_start + candidates1[overlap]
pointer_offset += overlap_count
return self_pointers_[:pointer_offset], other_pointers_[:pointer_offset]
@numba.njit(cache=True)
def find_peak_indices(
input_array,
output_array,
max_distance,
):
peaks = np.zeros(int(input_array[-1]), np.int64)
current_max_mz = 0
current_max_int = 0
current_max_index = 0
for index, (intensity, mz) in enumerate(zip(output_array, input_array)):
if mz > current_max_mz + max_distance:
peaks[int(current_max_mz)] = current_max_index
current_max_mz = mz
current_max_int = intensity
current_max_index = index
elif intensity > current_max_int:
current_max_mz = mz
current_max_int = intensity
current_max_index = index
return peaks
@numba.njit(nogil=True, cache=True)
def get_unique_apex_and_count(
ordered_self_indices,
ordered_other_indices,
return_all_counts=True
):
counts = np.zeros_like(ordered_self_indices)
self_max = np.max(ordered_self_indices)
other_max = np.max(ordered_other_indices)
unique_pair = np.zeros(counts.shape[0], np.bool_)
self_frequencies = np.zeros(self_max + 1, np.int64)
other_frequencies = np.zeros(other_max + 1, np.int64)
self_indptr = np.empty(self_max + 2, np.int64)
self_indptr[0] = 0
self_indptr[1:] = np.cumsum(np.bincount(ordered_self_indices))
self_order = np.argsort(ordered_self_indices)
other_indptr = np.empty(other_max + 2, np.int64)
other_indptr[0] = 0
other_indptr[1:] = np.cumsum(np.bincount(ordered_other_indices))
other_order = np.argsort(ordered_other_indices)
unique_count = 0
max_count = 0
apex = 0
for i in range(counts.shape[0]):
self_index = ordered_self_indices[i]
other_index = ordered_other_indices[i]
if (
self_frequencies[self_index] == 0
) & (
other_frequencies[other_index] == 0
):
unique_count += 1
unique_pair[i] = True
if unique_count > max_count:
apex = i
max_count = unique_count
else:
self_locs = self_order[
self_indptr[self_index]: self_indptr[self_index + 1]
]
if np.any(unique_pair[self_locs]):
unique_count -= 1
other_locs = other_order[
other_indptr[other_index]: other_indptr[other_index + 1]
]
if np.any(unique_pair[other_locs]):
unique_count -= 1
unique_pair[self_locs] = False
unique_pair[other_locs] = False
self_frequencies[self_index] += 1
other_frequencies[other_index] += 1
counts[i] = unique_count
if not return_all_counts:
counts = counts[apex: apex + 1]
return apex, counts
@numba.njit
def cluster_network(
indptr,
indices,
edge_pointers,
selected_edges,
):
node_count = indptr.shape[0] - 1
clusters = np.zeros(node_count, np.int64)
cluster_number = 0
for index in range(node_count):
if clusters[index] != 0:
continue
current_cluster = set()
new_indices = set()
new_indices.add(index)
while len(new_indices) > 0:
new_index = new_indices.pop()
current_cluster.add(new_index)
neighbors = indices[indptr[new_index]: indptr[new_index + 1]]
pointers = edge_pointers[indptr[new_index]: indptr[new_index + 1]]
selected = selected_edges[pointers]
new_indices |= set(neighbors[selected]) - current_cluster
cluster_number += 1
for i in current_cluster:
clusters[i] = cluster_number
return clusters
@numba.njit()
def __get_candidate_peptide_indices_for_edges(
indptr,
indices,
low_peptide_indices,
high_peptide_indices,
database_peptides,
max_batch
):
# TODO: Docstring
result_indptr = np.empty(indptr[-1], np.int64)
result_indices = np.empty(max_batch, np.int64)
current_index = 0
for start, end, low, high in zip(
indptr[:-1],
indptr[1:],
low_peptide_indices,
high_peptide_indices,
):
if (low == high) or (start == end):
result_indptr[start:end] = current_index
continue
if (
(end - start) * (high - low) + current_index
) >= result_indices.shape[0]:
new_result_indices = np.empty(
max_batch + result_indices.shape[0],
np.int64
)
new_result_indices[:result_indices.shape[0]] = result_indices
result_indices = new_result_indices
peptide_candidates = database_peptides[low: high]
peptide_candidates_set = set(peptide_candidates)
neighbors = indices[start: end]
for i, neighbor in enumerate(neighbors):
neighbor_low = low_peptide_indices[neighbor]
neighbor_high = high_peptide_indices[neighbor]
if neighbor_low == neighbor_high:
result_indptr[start + i] = current_index
continue
neighbor_peptide_candidates = database_peptides[
neighbor_low: neighbor_high
]
for neighbor_peptide_candidate in neighbor_peptide_candidates:
if neighbor_peptide_candidate in peptide_candidates_set:
result_indices[
current_index
] = neighbor_peptide_candidate
current_index += 1
result_indptr[start + i] = current_index
result_indptr[1:] = result_indptr[:-1]
result_indptr[0] = 0
return result_indptr, result_indices[:current_index]
@numba.njit(cache=True, nogil=True)
def annotate_mgf(
queries,
spectra_indptr,
low_limits,
high_limits,
peptide_pointers,
min_score=0
):
peptide_count = np.max(peptide_pointers) + 1
count = 0
for s in queries:
count += spectra_indptr[s + 1] - spectra_indptr[s]
score_results = np.empty(count, np.float64)
fragment_results = np.empty(count, np.int64)
index_results = np.empty(count, np.int64)
count_results = np.empty(count, np.int64)
candidate_counts = np.empty(count, np.int64)
spectrum_sizes = np.empty(count, np.int64)
current_i = 0
candidates = np.empty(peptide_count, np.int64)
for spectrum_index in queries:
spectrum_start = spectra_indptr[spectrum_index]
spectrum_end = spectra_indptr[spectrum_index + 1]
spectrum_size = spectrum_end - spectrum_start
if spectrum_size == 0:
continue
candidates[:] = 0
for ion_index in range(spectrum_start, spectrum_end):
peptide_low = low_limits[ion_index]
peptide_high = high_limits[ion_index]
if peptide_low == peptide_high:
continue
peptides = peptide_pointers[peptide_low: peptide_high]
candidates[peptides] += 1
for ion_index in range(spectrum_start, spectrum_end):
peptide_low = low_limits[ion_index]
peptide_high = high_limits[ion_index]
if peptide_low == peptide_high:
continue
(
score,
max_count,
max_fragment,
candidate_count
) = score_regression_estimator(
candidates[peptide_pointers[peptide_low: peptide_high]],
peptide_low,
peptide_count
)
if score > min_score:
score_results[current_i] = score
fragment_results[current_i] = max_fragment
index_results[current_i] = ion_index
count_results[current_i] = max_count
candidate_counts[current_i] = candidate_count
spectrum_sizes[current_i] = spectrum_size
current_i += 1
return (
score_results[:current_i],
fragment_results[:current_i],
index_results[:current_i],
count_results[:current_i],
candidate_counts[:current_i],
spectrum_sizes[:current_i],
)
@numba.njit(cache=True, nogil=True)
def annotate_network(
queries,
indptr,
indices,
edge_pointers,
selected_edges,
low_limits,
high_limits,
peptide_pointers,
):
peptide_count = np.max(peptide_pointers) + 1
count = len(queries)
score_results = np.empty(count, np.float64)
fragment_results = np.empty(count, np.int64)
index_results = np.empty(count, np.int64)
count_results = np.empty(count, np.int64)
candidate_counts = np.empty(count, np.int64)
neighbor_counts = np.empty(count, np.int64)
current_i = 0
for ion_index in queries:
peptide_low = low_limits[ion_index]
peptide_high = high_limits[ion_index]
if peptide_low == peptide_high:
continue
ion_start = indptr[ion_index]
ion_end = indptr[ion_index + 1]
good_neighbors = selected_edges[edge_pointers[ion_start: ion_end]]
neighbor_count = np.sum(good_neighbors)
if neighbor_count == 0:
continue
neighbors = indices[ion_start: ion_end][good_neighbors]
candidates = np.zeros(peptide_count, np.int64)
for neighbor_ion_index in neighbors:
neighbor_peptide_low = low_limits[neighbor_ion_index]
neighbor_peptide_high = high_limits[neighbor_ion_index]
if neighbor_peptide_low == neighbor_peptide_high:
continue
peptides = peptide_pointers[
neighbor_peptide_low: neighbor_peptide_high
]
candidates[peptides] += 1
(
score,
max_count,
max_fragment,
candidate_count
) = score_regression_estimator(
candidates[peptide_pointers[peptide_low: peptide_high]] + 1,
peptide_low,
peptide_count
)
if score > 0:
score_results[current_i] = score
fragment_results[current_i] = max_fragment
index_results[current_i] = ion_index
count_results[current_i] = max_count
candidate_counts[current_i] = candidate_count
neighbor_counts[current_i] = neighbor_count
current_i += 1
return (
score_results[:current_i],
fragment_results[:current_i],
index_results[:current_i],
count_results[:current_i],
candidate_counts[:current_i],
neighbor_counts[:current_i],
)
@numba.njit(cache=True, nogil=True)
def score_regression_estimator(candidates, offset, peptide_count):
frequencies = np.bincount(candidates)
frequencies = np.cumsum(frequencies[::-1])[::-1]
max_count = len(frequencies) - 1
max_fragment = offset + np.flatnonzero(candidates == max_count)[0]
if frequencies[-1] != 1:
score = 0
elif frequencies[1] == 1:
# score = 1 - 2**(-np.log2(peptide_count) * (max_count - 1))
score = 1 - peptide_count**(1 - max_count)
else:
x0 = 2 + np.flatnonzero(frequencies[2:] == 1)[0]
y0 = np.log2(frequencies[1])
slope = y0 / (x0 - 1)
score = 1 - 2**(-slope * (max_count - x0))
return score, max_count, max_fragment, len(candidates)
| 34.864662 | 82 | 0.630418 | 0 | 0 | 0 | 0 | 18,456 | 0.99504 | 0 | 0 | 389 | 0.020973 |
9d099c325b8e8eb13555bc61afea2a208b9050c9 | 241 | py | Python | Programming Fundamentals/Dictionaries/bakery.py | antonarnaudov/SoftUniProjects | 01cbdce2b350b57240045d1bc3e21d34f9d0351d | [
"MIT"
]
| null | null | null | Programming Fundamentals/Dictionaries/bakery.py | antonarnaudov/SoftUniProjects | 01cbdce2b350b57240045d1bc3e21d34f9d0351d | [
"MIT"
]
| null | null | null | Programming Fundamentals/Dictionaries/bakery.py | antonarnaudov/SoftUniProjects | 01cbdce2b350b57240045d1bc3e21d34f9d0351d | [
"MIT"
]
| null | null | null | def result(elements):
bakery = {}
for i in range(0, len(elements), 2):
key = elements[i]
value = elements[i + 1]
bakery[key] = int(value)
return bakery
tokens = input().split(' ')
print(result(tokens)) | 18.538462 | 40 | 0.564315 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.012448 |
9d0ab807d87d356a4a4fb529654e22486400f676 | 1,525 | py | Python | vtrace/const.py | rnui2k/vivisect | b7b00f2d03defef28b4b8c912e3a8016e956c5f7 | [
"ECL-2.0",
"Apache-2.0"
]
| 716 | 2015-01-01T14:41:11.000Z | 2022-03-28T06:51:50.000Z | vtrace/const.py | rnui2k/vivisect | b7b00f2d03defef28b4b8c912e3a8016e956c5f7 | [
"ECL-2.0",
"Apache-2.0"
]
| 266 | 2015-01-01T15:07:27.000Z | 2022-03-30T15:19:26.000Z | vtrace/const.py | rnui2k/vivisect | b7b00f2d03defef28b4b8c912e3a8016e956c5f7 | [
"ECL-2.0",
"Apache-2.0"
]
| 159 | 2015-01-01T16:19:44.000Z | 2022-03-21T21:55:34.000Z | # Order must match format junk
# NOTIFY_ALL is kinda special, if you registerNotifier
# with it, you get ALL notifications.
NOTIFY_ALL = 0 # Get all notifications
NOTIFY_SIGNAL = 1 # Callback on signal/exception
NOTIFY_BREAK = 2 # Callback on breakpoint / sigtrap
NOTIFY_STEP = 3 # Callback on singlestep complete
NOTIFY_SYSCALL = 4 # Callback on syscall (linux only for now)
NOTIFY_CONTINUE = 5 # Callback on continue (not done for step)
NOTIFY_EXIT = 6 # Callback on process exit
NOTIFY_ATTACH = 7 # Callback on successful attach
NOTIFY_DETACH = 8 # Callback on impending process detach
# The following notifiers are *only* available on some platforms
# (and may be kinda faked out ala library load events on posix)
NOTIFY_LOAD_LIBRARY = 9
NOTIFY_UNLOAD_LIBRARY = 10
NOTIFY_CREATE_THREAD = 11
NOTIFY_EXIT_THREAD = 12
NOTIFY_DEBUG_PRINT = 13 # Some platforms support this (win32).
NOTIFY_MAX = 20
# File Descriptor / Handle Types
FD_UNKNOWN = 0 # Unknown or we don't have a type for it
FD_FILE = 1
FD_SOCKET = 2
FD_PIPE = 3
FD_LOCK = 4 # Win32 Mutant/Lock/Semaphore
FD_EVENT = 5 # Win32 Event/KeyedEvent
FD_THREAD = 6 # Win32 Thread
FD_REGKEY = 7 # Win32 Registry Key
# Vtrace Symbol Types
SYM_MISC = -1
SYM_GLOBAL = 0 # Global (mostly vars)
SYM_LOCAL = 1 # Locals
SYM_FUNCTION = 2 # Functions
SYM_SECTION = 3 # Binary section
SYM_META = 4 # Info that we enumerate
# Vtrace Symbol Offsets
VSYM_NAME = 0
VSYM_ADDR = 1
VSYM_SIZE = 2
VSYM_TYPE = 3
VSYM_FILE = 4
| 33.152174 | 66 | 0.733115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 869 | 0.569836 |
9d0d12599f8d63386d38681b6e12a10636886357 | 3,248 | py | Python | src/ezdxf/groupby.py | jkjt/ezdxf | 2acc5611b81476ea16b98063b9f55446a9182b81 | [
"MIT"
]
| 515 | 2017-01-25T05:46:52.000Z | 2022-03-29T09:52:27.000Z | src/ezdxf/groupby.py | jkjt/ezdxf | 2acc5611b81476ea16b98063b9f55446a9182b81 | [
"MIT"
]
| 417 | 2017-01-25T10:01:17.000Z | 2022-03-29T09:22:04.000Z | src/ezdxf/groupby.py | jkjt/ezdxf | 2acc5611b81476ea16b98063b9f55446a9182b81 | [
"MIT"
]
| 149 | 2017-02-01T15:52:02.000Z | 2022-03-17T10:33:38.000Z | # Purpose: Grouping entities by DXF attributes or a key function.
# Copyright (c) 2017-2021, Manfred Moitzi
# License: MIT License
from typing import Iterable, Hashable, Dict, List, TYPE_CHECKING
from ezdxf.lldxf.const import DXFValueError, DXFAttributeError
if TYPE_CHECKING:
from ezdxf.eztypes import DXFEntity, KeyFunc
def groupby(
entities: Iterable["DXFEntity"], dxfattrib: str = "", key: "KeyFunc" = None
) -> Dict[Hashable, List["DXFEntity"]]:
"""
Groups a sequence of DXF entities by a DXF attribute like ``'layer'``,
returns a dict with `dxfattrib` values as key and a list of entities
matching this `dxfattrib`.
A `key` function can be used to combine some DXF attributes (e.g. layer and
color) and should return a hashable data type like a tuple of strings,
integers or floats, `key` function example::
def group_key(entity: DXFEntity):
return entity.dxf.layer, entity.dxf.color
For not suitable DXF entities return ``None`` to exclude this entity, in
this case it's not required, because :func:`groupby` catches
:class:`DXFAttributeError` exceptions to exclude entities, which do not
provide layer and/or color attributes, automatically.
Result dict for `dxfattrib` = ``'layer'`` may look like this::
{
'0': [ ... list of entities ],
'ExampleLayer1': [ ... ],
'ExampleLayer2': [ ... ],
...
}
Result dict for `key` = `group_key`, which returns a ``(layer, color)``
tuple, may look like this::
{
('0', 1): [ ... list of entities ],
('0', 3): [ ... ],
('0', 7): [ ... ],
('ExampleLayer1', 1): [ ... ],
('ExampleLayer1', 2): [ ... ],
('ExampleLayer1', 5): [ ... ],
('ExampleLayer2', 7): [ ... ],
...
}
All entity containers (modelspace, paperspace layouts and blocks) and the
:class:`~ezdxf.query.EntityQuery` object have a dedicated :meth:`groupby`
method.
Args:
entities: sequence of DXF entities to group by a DXF attribute or a
`key` function
dxfattrib: grouping DXF attribute like ``'layer'``
key: key function, which accepts a :class:`DXFEntity` as argument and
returns a hashable grouping key or ``None`` to ignore this entity
"""
if all((dxfattrib, key)):
raise DXFValueError(
"Specify a dxfattrib or a key function, but not both."
)
if dxfattrib != "":
key = lambda entity: entity.dxf.get_default(dxfattrib)
if key is None:
raise DXFValueError(
"no valid argument found, specify a dxfattrib or a key function, "
"but not both."
)
result: Dict[Hashable, List["DXFEntity"]] = dict()
for dxf_entity in entities:
if not dxf_entity.is_alive:
continue
try:
group_key = key(dxf_entity)
except DXFAttributeError:
# ignore DXF entities, which do not support all query attributes
continue
if group_key is not None:
group = result.setdefault(group_key, [])
group.append(dxf_entity)
return result
| 35.692308 | 79 | 0.601293 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,292 | 0.705665 |
9d0e38af685d991cde1a6a41f4c243ad673af7b8 | 1,839 | py | Python | tests/test_basic.py | nk412/companycase | 5b93478a79293a4bc93112b805eff56c44756f18 | [
"MIT"
]
| 7 | 2016-09-08T15:25:33.000Z | 2022-02-01T13:21:40.000Z | tests/test_basic.py | nk412/companycase | 5b93478a79293a4bc93112b805eff56c44756f18 | [
"MIT"
]
| 1 | 2016-07-12T10:36:02.000Z | 2016-07-12T10:36:02.000Z | tests/test_basic.py | nk412/companycase | 5b93478a79293a4bc93112b805eff56c44756f18 | [
"MIT"
]
| 2 | 2016-09-17T17:41:28.000Z | 2020-02-29T22:58:09.000Z | # coding=utf-8
import unittest
from companycase import CompanyCase
class TestEnglishCCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ccase = CompanyCase()
def test_simple(self):
self.assertEqual(self.ccase.apply("foobar ltd"), "Foobar LTD")
self.assertEqual(self.ccase.apply("hsbc bank (uk) ltd"), "HSBC Bank (UK) LTD")
self.assertEqual(self.ccase.apply("AXA INSURANCE"), "AXA Insurance")
def test_abbreviations(self):
s = 'ltd plc llc and of'
expected = 'LTD PLC LLC and of'
self.assertEqual(self.ccase.apply(s, 0.0), expected)
self.assertEqual(self.ccase.apply(s, 1.0), expected)
def test_threshold(self):
s = 'hello there nk'
self.assertEqual(self.ccase.apply(s), "Hello There NK")
self.assertEqual(self.ccase.apply(s, 0.0), "Hello There Nk")
self.assertEqual(self.ccase.apply(s, 1.0), "HELLO THERE NK")
def test_force_case(self):
self.ccase.force_case_for_words(['fOO', 'bAr'])
s = 'foo bar limited plc ltd'
self.assertEqual(self.ccase.apply(s), "fOO bAr Limited PLC LTD")
self.assertEqual(self.ccase.apply(s, 0.0), "fOO bAr Limited PLC LTD")
self.assertEqual(self.ccase.apply(s, 1.0), "fOO bAr LIMITED PLC LTD")
def test_unicode(self):
self.assertEqual(self.ccase.apply(u"foobar ltd"), "Foobar LTD")
self.assertEqual(self.ccase.apply(u"hsbc bank (uk) ltd"), "HSBC Bank (UK) LTD")
self.assertEqual(self.ccase.apply(u"AXA INSURANCE"), "AXA Insurance")
self.assertEqual(self.ccase.apply(u"tromsø, arctic explorers plc"), u'Tromsø, Arctic Explorers PLC')
self.assertEqual(self.ccase.apply(u"tromsø, arctic explorers plc", 1.0), u'TROMSØ, ARCTIC EXPLORERS PLC')
if __name__ == '__main__':
unittest.main()
| 39.12766 | 113 | 0.659598 | 1,724 | 0.935431 | 0 | 0 | 71 | 0.038524 | 0 | 0 | 557 | 0.302225 |
9d0eed15b3c0630d157c26b0aac4e458a282e19f | 8,527 | py | Python | main_single.py | wang-chen/AirLoop | 12fb442c911002427a51f00d43f747ef593bd186 | [
"BSD-3-Clause"
]
| 39 | 2021-09-28T19:48:13.000Z | 2022-03-17T06:44:19.000Z | main_single.py | wang-chen/AirLoop | 12fb442c911002427a51f00d43f747ef593bd186 | [
"BSD-3-Clause"
]
| null | null | null | main_single.py | wang-chen/AirLoop | 12fb442c911002427a51f00d43f747ef593bd186 | [
"BSD-3-Clause"
]
| 3 | 2021-10-04T01:26:17.000Z | 2022-02-12T04:48:50.000Z | #!/usr/bin/env python3
import os
import tqdm
import torch
import random
import numpy as np
import torch.nn as nn
import configargparse
import torch.optim as optim
from tensorboard import program
from torch.utils.tensorboard import SummaryWriter
import yaml
from models import FeatureNet
from datasets import get_dataset
from losses import MemReplayLoss
from utils.evaluation import RecognitionEvaluator
from utils.misc import save_model, load_model, GlobalStepCounter, ProgressBarDescription
@torch.no_grad()
def evaluate(net, loader, counter, args, writer=None):
net.eval()
evaluator = RecognitionEvaluator(loader=loader, args=args)
for images, aux, env_seq in tqdm.tqdm(loader):
images = images.to(args.device)
gd = net(images)
evaluator.observe(gd, aux, images, env_seq)
evaluator.report()
def train(model, loader, optimizer, counter, args, writer=None):
model.train()
if 'train' in args.task:
criterion = MemReplayLoss(writer=writer, viz_start=args.viz_start, viz_freq=args.viz_freq, counter=counter, args=args)
last_env = None
for epoch in range(args.epoch):
enumerator = tqdm.tqdm(loader)
pbd = ProgressBarDescription(enumerator)
for images, aux, env_seq in enumerator:
images = images.to(args.device)
loss = criterion(model, images, aux, env_seq[0])
# in case loss is manually set to 0 to skip batches
if loss.requires_grad and not loss.isnan():
loss.backward()
optimizer.step(closure=criterion.ll_loss)
optimizer.zero_grad()
# save model on env change for env-incremental tasks
if 'seq' in args.task and last_env != env_seq[0][0]:
if last_env is not None:
save_model(model, '%s.%s' % (args.save, last_env))
last_env = env_seq[0][0]
if (args.save_freq is not None and counter.steps % args.save_freq == 0) \
or (args.save_steps is not None and counter.steps in args.save_steps):
save_model(model, '%s.step%d' % (args.save, counter.steps))
pbd.update(loss)
counter.step()
if 'seq' in args.task:
if args.save is not None:
save_model(model, '%s.%s' % (args.save, last_env))
if args.ll_method is not None:
criterion.ll_loss.save(task=last_env)
else:
save_model(model, '%s.epoch%d' % (args.save, epoch))
def main(args):
if args.deterministic >= 1:
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.deterministic >= 2:
torch.backends.cudnn.benchmark = False
if args.deterministic >= 3:
torch.set_deterministic(True)
loader = get_dataset(args)
if args.devices is None:
args.devices = ['cuda:%d' % i for i in range(torch.cuda.device_count())] if torch.cuda.is_available() else ['cpu']
args.device = args.devices[0]
model = FeatureNet(args.gd_dim).to(args.device)
if args.load:
load_model(model, args.load, device=args.device)
if not args.no_parallel:
model = nn.DataParallel(model, device_ids=args.devices)
writer = None
if args.log_dir is not None:
log_dir = args.log_dir
# timestamp runs into the same logdir
if os.path.exists(log_dir) and os.path.isdir(log_dir):
from datetime import datetime
log_dir = os.path.join(log_dir, datetime.now().strftime('%b%d_%H-%M-%S'))
writer = SummaryWriter(log_dir)
tb = program.TensorBoard()
tb.configure(argv=[None, '--logdir', log_dir, '--bind_all', '--samples_per_plugin=images=50'])
print(('TensorBoard at %s \n' % tb.launch()))
step_counter = GlobalStepCounter(initial_step=1)
if 'train' in args.task:
optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.w_decay)
train(model, loader, optimizer, step_counter, args, writer)
if 'eval' in args.task:
evaluate(model, loader, step_counter, args, writer)
def run(args=None):
# Arguements
parser = configargparse.ArgumentParser(description='Feature Graph Networks', default_config_files=['./config/config.yaml'])
# general
parser.add_argument("--config", is_config_file=True, help="Config file path")
parser.add_argument("--task", type=str, choices=['train-seq', 'train-joint', 'eval'], default='train-seq', help="Task to perform")
parser.add_argument("--catalog-dir", type=str, default='./.cache/catalog', help='Processed dataset catalog')
parser.add_argument("--no-parallel", action='store_true', help="DataParallel")
parser.add_argument("--devices", type=str, nargs='+', default=None, help="Available devices")
parser.add_argument("--deterministic", type=int, default=3, help='Level of determinism.')
parser.add_argument("--seed", type=int, default=0, help='Random seed.')
parser.add_argument("--ll-config", type=str, help='Config file for lifelong losses')
parser.add_argument("--print-configs", action='store_true', help='Print parsed configs to console')
# dataset
parser.add_argument("--dataset-root", type=str, default='/data/datasets/', help="Home for all datasets")
parser.add_argument("--dataset", type=str, choices=['tartanair', 'nordland', 'robotcar'], default='tartanair', help="Dataset to use")
parser.add_argument("--include", type=str, default=None, help="Regex for sequences to include")
parser.add_argument("--exclude", type=str, default=None, help="Regex for sequences to exclude")
parser.add_argument('--scale', type=float, default=0.5, help='Image scale')
parser.add_argument("--num-workers", type=int, default=4, help="Number of workers in dataloader")
# model
parser.add_argument("--gd-dim", type=int, default=1024, help="Global descriptor dimension")
# training
parser.add_argument("--load", type=str, default=None, help="load pretrained model")
parser.add_argument("--save", type=str, default=None, help="Model save path")
parser.add_argument("--save-freq", type=int, help="Model saving frequency")
parser.add_argument("--save-steps", type=int, nargs="+", help="Specific steps to save model")
parser.add_argument("--ll-method", type=str, help="Lifelong learning method")
parser.add_argument("--ll-weight-dir", type=str, default=None, help="Load directory for regularization weights")
parser.add_argument("--ll-weight-load", type=str, nargs='+', help="Environment names for regularization weights")
parser.add_argument("--ll-strength", type=float, nargs='+', help="Weights of lifelong losses")
parser.add_argument("--batch-size", type=int, default=8, help="Minibatch size")
parser.add_argument("--lr", type=float, default=2e-3, help="Learning rate")
parser.add_argument("--w-decay", type=float, default=0, help="Weight decay of optim")
parser.add_argument("--epoch", type=int, default=15, help="Number of epoches")
parser.add_argument("--mem-size", type=int, default=1000, help="Memory size")
parser.add_argument("--log-dir", type=str, default=None, help="Tensorboard Log dir")
parser.add_argument("--viz-start", type=int, default=np.inf, help='Visualize starting from iteration')
parser.add_argument("--viz-freq", type=int, default=1, help='Visualize every * iteration(s)')
# evaluation
parser.add_argument("--eval-split-seed", type=int, default=42, help='Seed for splitting the dataset')
parser.add_argument("--eval-percentage", type=float, default=0.2, help='Percentage of sequences for eval')
parser.add_argument("--eval-save", type=str, help='Raw evaluation result save path')
parser.add_argument("--eval-desc-save", type=str, help='Generated global descriptor save path')
parser.add_argument("--eval-gt-dir", type=str, help='Evaluation groundtruth save directory')
parserd_args = parser.parse_args(args)
# domain specific configs
if parserd_args.ll_config is not None and parserd_args.ll_method is not None:
with open(parserd_args.ll_config, 'r') as f:
for k, v in yaml.safe_load(f)[parserd_args.ll_method].items():
setattr(parserd_args, k.replace('-', '_'), v)
if parserd_args.print_configs:
print("Training config:", parserd_args)
main(parserd_args)
if __name__ == "__main__":
run()
| 45.844086 | 137 | 0.673273 | 0 | 0 | 0 | 0 | 345 | 0.04046 | 0 | 0 | 2,094 | 0.245573 |
9d0fc4d37e8008ce4ffedc8ff1748729bd11a8f1 | 271 | py | Python | skilletlib/skillet/__init__.py | annabarone/skilletlib | d1298218a1a0be35eb9fac2ae79323df600d8900 | [
"Apache-2.0"
]
| 6 | 2020-04-27T18:08:02.000Z | 2022-01-14T13:27:19.000Z | skilletlib/skillet/__init__.py | annabarone/skilletlib | d1298218a1a0be35eb9fac2ae79323df600d8900 | [
"Apache-2.0"
]
| 85 | 2019-10-28T19:13:55.000Z | 2021-07-14T13:00:28.000Z | skilletlib/skillet/__init__.py | annabarone/skilletlib | d1298218a1a0be35eb9fac2ae79323df600d8900 | [
"Apache-2.0"
]
| 7 | 2019-12-05T20:17:16.000Z | 2021-12-09T01:16:58.000Z | # from .panos import PanosSkillet
# from .docker import DockerSkillet
# from .pan_validation import PanValidationSkillet
# from .python3 import Python3Skillet
# from .rest import RestSkillet
# from .template import TemplateSkillet
# from .workflow import WorkflowSkillet
| 33.875 | 50 | 0.819188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 264 | 0.97417 |
9d10f233df729f37438c93bc6d49f9504b03d459 | 1,192 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/rss_proxy/views.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
]
| 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/rss_proxy/views.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
]
| null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/rss_proxy/views.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
]
| 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | """
Views for the rss_proxy djangoapp.
"""
import requests
from django.conf import settings
from django.core.cache import cache
from django.http import HttpResponse, HttpResponseNotFound
from lms.djangoapps.rss_proxy.models import WhitelistedRssUrl
CACHE_KEY_RSS = "rss_proxy.{url}"
def proxy(request):
"""
Proxy requests for the given RSS url if it has been whitelisted.
"""
url = request.GET.get('url')
if url and WhitelistedRssUrl.objects.filter(url=url).exists():
# Check cache for RSS if the given url is whitelisted
cache_key = CACHE_KEY_RSS.format(url=url)
status_code = 200
rss = cache.get(cache_key, '')
print(cache_key)
print('Cached rss: %s' % rss)
if not rss:
# Go get the RSS from the URL if it was not cached
resp = requests.get(url)
status_code = resp.status_code
if status_code == 200:
# Cache RSS
rss = resp.content
cache.set(cache_key, rss, settings.RSS_PROXY_CACHE_TIMEOUT)
return HttpResponse(rss, status=status_code, content_type='application/xml')
return HttpResponseNotFound()
| 29.8 | 84 | 0.653523 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 293 | 0.245805 |
9d1115c99ef6af6ee80e12df2bf5eac7ff811ea7 | 149 | py | Python | CorePythonProg/ch02/0206.py | mallius/CppPrimer | 0285fabe5934492dfed0a9cf67ba5650982a5f76 | [
"MIT"
]
| null | null | null | CorePythonProg/ch02/0206.py | mallius/CppPrimer | 0285fabe5934492dfed0a9cf67ba5650982a5f76 | [
"MIT"
]
| null | null | null | CorePythonProg/ch02/0206.py | mallius/CppPrimer | 0285fabe5934492dfed0a9cf67ba5650982a5f76 | [
"MIT"
]
| 1 | 2022-01-25T15:51:34.000Z | 2022-01-25T15:51:34.000Z | #!/usr/bin/env python
numTemp = raw_input('Enter a number: ')
num = int(numTemp)
if num > 0:
print '>0'
elif num ==0:
print '0'
else:
print '<0'
| 13.545455 | 39 | 0.61745 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.33557 |
9d123f052b89aece17eb457b8ad9cafa6d71e501 | 314 | py | Python | bootcamp/accounts/urls.py | elbakouchi/bootcamp | 2c7a0cd2ddb7632acb3009f94d728792ddf9644f | [
"MIT"
]
| null | null | null | bootcamp/accounts/urls.py | elbakouchi/bootcamp | 2c7a0cd2ddb7632acb3009f94d728792ddf9644f | [
"MIT"
]
| null | null | null | bootcamp/accounts/urls.py | elbakouchi/bootcamp | 2c7a0cd2ddb7632acb3009f94d728792ddf9644f | [
"MIT"
]
| null | null | null | from django.conf.urls import url
from .views import *
app_name = "accounts"
urlpatterns = [
url(r"^signup/$", CustomSignupView.as_view(), name="custom_signup"),
url(r"^destroy/$", AjaxLogoutView.as_view(), name="destroy"),
url(r"^(?P<username>[\w.@+-]+)/$", ProfileView.as_view(), name="profile"),
]
| 28.545455 | 78 | 0.652866 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.308917 |
9d1277aded11ab70c99a610d14fb0758ed951638 | 8,195 | py | Python | utils/mininet/mininet_builder.py | jstavr/SDN_Project | 9fe5a65f46eadf15e1da43d9f8125b8c15161bbd | [
"Apache-2.0"
]
| null | null | null | utils/mininet/mininet_builder.py | jstavr/SDN_Project | 9fe5a65f46eadf15e1da43d9f8125b8c15161bbd | [
"Apache-2.0"
]
| null | null | null | utils/mininet/mininet_builder.py | jstavr/SDN_Project | 9fe5a65f46eadf15e1da43d9f8125b8c15161bbd | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
Description: Load topology in Mininet
Author: James Hongyi Zeng (hyzeng_at_stanford.edu)
'''
from argparse import ArgumentParser
from socket import gethostbyname
from os import getuid
from mininet.log import lg, info
from mininet.cli import CLI
from mininet.net import Mininet
from mininet.topo import Topo
from mininet.link import Link, Intf
from mininet.node import Host, OVSKernelSwitch, Controller, RemoteController
class StanfordTopo( Topo ):
"Topology for Stanford backbone"
PORT_ID_MULTIPLIER = 1
INTERMEDIATE_PORT_TYPE_CONST = 1
OUTPUT_PORT_TYPE_CONST = 2
PORT_TYPE_MULTIPLIER = 10000
SWITCH_ID_MULTIPLIER = 100000
DUMMY_SWITCH_BASE = 1000
PORT_MAP_FILENAME = "data/port_map.txt"
TOPO_FILENAME = "data/backbone_topology.tf"
dummy_switches = set()
def __init__( self ):
# Read topology info
ports = self.load_ports(self.PORT_MAP_FILENAME)
links = self.load_topology(self.TOPO_FILENAME)
switches = ports.keys()
# Add default members to class.
super( StanfordTopo, self ).__init__()
# Create switch nodes
for s in switches:
self.add_switch( "s%s" % s )
# Wire up switches
self.create_links(links, ports)
# Wire up hosts
host_id = len(switches) + 1
for s in switches:
# Edge ports
for port in ports[s]:
self.add_host( "h%s" % host_id )
self.add_link( "h%s" % host_id, "s%s" % s, 0, port )
host_id += 1
# Consider all switches and hosts 'on'
# self.enable_all()
def load_ports(self, filename):
ports = {}
f = open(filename, 'r')
for line in f:
if not line.startswith("$") and line != "":
tokens = line.strip().split(":")
port_flat = int(tokens[1])
dpid = port_flat / self.SWITCH_ID_MULTIPLIER
port = port_flat % self.PORT_TYPE_MULTIPLIER
if dpid not in ports.keys():
ports[dpid] = set()
if port not in ports[dpid]:
ports[dpid].add(port)
f.close()
return ports
def load_topology(self, filename):
links = set()
f = open(filename, 'r')
for line in f:
if line.startswith("link"):
tokens = line.split('$')
src_port_flat = int(tokens[1].strip('[]').split(', ')[0])
dst_port_flat = int(tokens[7].strip('[]').split(', ')[0])
links.add((src_port_flat, dst_port_flat))
f.close()
return links
def create_links(self, links, ports):
'''Generate dummy switches
For example, interface A1 connects to B1 and C1 at the same time. Since
Mininet uses veth, which supports point to point communication only,
we need to manually create dummy switches
@param links link info from the file
@param ports port info from the file
'''
# First pass, find special ports with more than 1 peer port
first_pass = {}
for (src_port_flat, dst_port_flat) in links:
src_dpid = src_port_flat / self.SWITCH_ID_MULTIPLIER
dst_dpid = dst_port_flat / self.SWITCH_ID_MULTIPLIER
src_port = src_port_flat % self.PORT_TYPE_MULTIPLIER
dst_port = dst_port_flat % self.PORT_TYPE_MULTIPLIER
if (src_dpid, src_port) not in first_pass.keys():
first_pass[(src_dpid, src_port)] = set()
first_pass[(src_dpid, src_port)].add((dst_dpid, dst_port))
if (dst_dpid, dst_port) not in first_pass.keys():
first_pass[(dst_dpid, dst_port)] = set()
first_pass[(dst_dpid, dst_port)].add((src_dpid, src_port))
# Second pass, create new links for those special ports
dummy_switch_id = self.DUMMY_SWITCH_BASE
for (dpid, port) in first_pass.keys():
# Special ports!
if(len(first_pass[(dpid,port)])>1):
self.add_switch( "s%s" % dummy_switch_id )
self.dummy_switches.add(dummy_switch_id)
self.add_link( node1="s%s" % dpid, node2="s%s" % dummy_switch_id, port1=port, port2=1 )
dummy_switch_port = 2
for (dst_dpid, dst_port) in first_pass[(dpid,port)]:
first_pass[(dst_dpid, dst_port)].discard((dpid,port))
self.add_link( node1="s%s" % dummy_switch_id, node2="s%s" % dst_dpid, port1=dummy_switch_port, port2=dst_port)
ports[dst_dpid].discard(dst_port)
dummy_switch_port += 1
dummy_switch_id += 1
first_pass[(dpid,port)] = set()
ports[dpid].discard(port)
# Third pass, create the remaining links
for (dpid, port) in first_pass.keys():
for (dst_dpid, dst_port) in first_pass[(dpid,port)]:
self.add_link( node1="s%s" % dpid, node2="s%s" % dst_dpid, port1=port, port2=dst_port )
ports[dst_dpid].discard(dst_port)
ports[dpid].discard(port)
class StanfordMininet ( Mininet ):
def build( self ):
super( StanfordMininet, self ).build()
# FIXME: One exception... Dual links between yoza and yozb
# Need _manual_ modification for different topology files!!!
self.topo.add_link( node1="s%s" % 15, node2="s%s" % 16, port1=7, port2=4 )
def StanfordTopoTest( controller_ip, controller_port, dummy_controller_ip, dummy_controller_port ):
topo = StanfordTopo()
main_controller = lambda a: RemoteController( a, ip=controller_ip, port=controller_port)
net = StanfordMininet( topo=topo, switch=OVSKernelSwitch, controller=main_controller)
net.start()
# These switches should be set to a local controller..
dummy_switches = topo.dummy_switches
dummyClass = lambda a: RemoteController( a, ip=dummy_controller_ip, port=dummy_controller_port)
dummy_controller = net.addController( name='dummy_controller', controller=dummyClass)
dummy_controller.start()
for dpid in dummy_switches:
switch = net.nameToNode["s%s" % dpid]
switch.pause()
switch.start( [dummy_controller] )
# Turn on STP
for switchName in topo.switches():
switch = net.nameToNode[switchName]
cmd = "ovs-vsctl set Bridge %s stp_enable=true" % switch.name
switch.cmd(cmd)
switch.cmd('ovs-vsctl set Bridge s1 other_config:stp-priority=0x10')
CLI( net )
net.stop()
if __name__ == '__main__':
if getuid()!=0:
print "Please run this script as root / use sudo."
exit(-1)
lg.setLogLevel( 'info')
description = "Put Stanford backbone in Mininet"
parser = ArgumentParser(description=description)
parser.add_argument("-c", dest="controller_name",
default="localhost",
help="Controller's hostname or IP")
parser.add_argument("-p", dest="controller_port",type=int,
default=6633,
help="Controller's port")
parser.add_argument("-c2", dest="dummy_controller_name",
default="localhost",
help="Dummy controller's hostname or IP")
parser.add_argument("-p2", dest="dummy_controller_port",type=int,
default=6633,
help="Dummy ontroller's port")
args = parser.parse_args()
print description
print "Starting with primary controller %s:%d" % (args.controller_name, args.controller_port)
print "Starting with dummy controller %s:%d" % (args.dummy_controller_name, args.dummy_controller_port)
Mininet.init()
StanfordTopoTest(gethostbyname(args.controller_name), args.controller_port, gethostbyname(args.dummy_controller_name), args.dummy_controller_port)
| 39.210526 | 150 | 0.598292 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,758 | 0.336547 |
9d1338f96592532b4f49b0f4d8c0180dee99ffe0 | 1,833 | py | Python | tests/integration/test_translated_content.py | asmeurer/nikola | ea1c651bfed0fd6337f1d22cf8dd99899722912c | [
"MIT"
]
| 1,901 | 2015-01-02T02:49:51.000Z | 2022-03-30T23:31:35.000Z | tests/integration/test_translated_content.py | asmeurer/nikola | ea1c651bfed0fd6337f1d22cf8dd99899722912c | [
"MIT"
]
| 1,755 | 2015-01-01T08:17:16.000Z | 2022-03-24T18:02:22.000Z | tests/integration/test_translated_content.py | asmeurer/nikola | ea1c651bfed0fd6337f1d22cf8dd99899722912c | [
"MIT"
]
| 421 | 2015-01-02T18:06:37.000Z | 2022-03-28T23:18:54.000Z | """
Test a site with translated content.
Do not test titles as we remove the translation.
"""
import io
import os
import shutil
import lxml.html
import pytest
import nikola.plugins.command.init
from nikola import __main__
from .helper import cd
from .test_empty_build import ( # NOQA
test_archive_exists,
test_avoid_double_slash_in_rss,
test_check_files,
test_check_links,
test_index_in_sitemap,
)
def test_translated_titles(build, output_dir, other_locale):
"""Check that translated title is picked up."""
normal_file = os.path.join(output_dir, "pages", "1", "index.html")
translated_file = os.path.join(output_dir, other_locale, "pages", "1", "index.html")
# Files should be created
assert os.path.isfile(normal_file)
assert os.path.isfile(translated_file)
# And now let's check the titles
with io.open(normal_file, "r", encoding="utf8") as inf:
doc = lxml.html.parse(inf)
assert doc.find("//title").text == "Foo | Demo Site"
with io.open(translated_file, "r", encoding="utf8") as inf:
doc = lxml.html.parse(inf)
assert doc.find("//title").text == "Bar | Demo Site"
@pytest.fixture(scope="module")
def build(target_dir, test_dir):
"""Build the site."""
init_command = nikola.plugins.command.init.CommandInit()
init_command.create_empty_site(target_dir)
init_command.create_configuration(target_dir)
src = os.path.join(test_dir, "..", "data", "translated_titles")
for root, dirs, files in os.walk(src):
for src_name in files:
rel_dir = os.path.relpath(root, src)
dst_file = os.path.join(target_dir, rel_dir, src_name)
src_file = os.path.join(root, src_name)
shutil.copy2(src_file, dst_file)
with cd(target_dir):
__main__.main(["build"])
| 29.095238 | 88 | 0.681942 | 0 | 0 | 0 | 0 | 663 | 0.361702 | 0 | 0 | 383 | 0.208947 |
9d13de1d5fcb7bb17eb81bbe83f7d14929b0ec78 | 8,826 | py | Python | src/train.py | weiyi1991/UA_Concurrent | 11238c778c60095abf326800d6e6a13a643bf071 | [
"MIT"
]
| null | null | null | src/train.py | weiyi1991/UA_Concurrent | 11238c778c60095abf326800d6e6a13a643bf071 | [
"MIT"
]
| 1 | 2020-09-02T12:24:59.000Z | 2020-09-02T12:24:59.000Z | src/train.py | weiyi1991/UA_Concurrent | 11238c778c60095abf326800d6e6a13a643bf071 | [
"MIT"
]
| null | null | null | import argparse
import os
import torch
import torch.nn.functional as F
from model_ST import *
import data
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
import sys
from predict import evaluate_MA
from tensorboardX import SummaryWriter
# print model parameter
def print_model(model):
print('=================== Print model parameters ================')
print(model.state_dict().keys())
for i, j in model.named_parameters():
print(i)
print(j)
# Training settings
parser = argparse.ArgumentParser(description='Relation network for concurrent activity detection')
parser.add_argument('--BATCH_SIZE', type=int, default=256, help='Training batch size. Default=256')
parser.add_argument('--save_every', type=int, default=5, help='Save model every save_every epochs. Defualt=5')
parser.add_argument('--EPOCH', type=int, default=500, help='Number of epochs to train. Default=600')
parser.add_argument('--LR', type=float, default=0.001, help='Learning Rate. Default=0.001')
parser.add_argument('--TRAIN', action='store_true', default=True, help='Train or test? ')
parser.add_argument('--DEBUG', action='store_true', default=False, help='Debug mode (load less data)? Defualt=False')
parser.add_argument('--clip_grad', type=float, default=5.0, help='Gradient clipping parameter. Default=5,0')
parser.add_argument('--dataPath', type=str, default='/home/yi/PycharmProjects/relation_network/data/UCLA/new273',
help='path to the data folder')
parser.add_argument('--checkpoint', type=str, help='Checkpoint folder name under ./model/')
parser.add_argument('--verbose', type=int, default=1, help='Print verbose information? Default=True')
# model parameters
parser.add_argument('--n_input', type=int, default=37, help='Input feature vector size. Default=37')
parser.add_argument('--n_hidden', type=int, default=128, help='Hidden units for LSTM baseline. Default=128')
parser.add_argument('--n_layers', type=int, default=2, help='LSTM layer number. Default=2')
parser.add_argument('--n_class', type=int, default=12, help='Class label number. Default=12')
parser.add_argument('--use_lstm', action='store_true', default=True, help='Use LSTM for relation network classifier. Default=True')
parser.add_argument('--df', type=int, default=64, help='Relation feature dimension. Default=64')
parser.add_argument('--dk', type=int, default=8, help='Key feature dim. Default=8')
parser.add_argument('--nr', type=int, default=4, help='Multihead number. Default=4')
opt = parser.parse_args()
checkpoint_dir = './model/{}/'.format(opt.checkpoint)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
orig_stdout = sys.stdout
f = open(checkpoint_dir + '/parameter.txt', 'w')
sys.stdout = f
print(opt)
f.close()
sys.stdout = orig_stdout
# data preparation
train_dataset = data.ConActDataset(opt.dataPath)
test_dataset = data.ConActDataset(opt.dataPath, train=not opt.TRAIN)
writer = SummaryWriter()
# only take few sequences for debuging
debug_seq = 3
if opt.DEBUG:
train_data = []
for i in range(debug_seq):
input, labels = train_dataset[i]
train_data.append((input, labels))
print("%s loaded." % train_dataset.seq_list[i])
else:
print('Loading training data ----------------------')
train_data = []
train_labels = []
for i, (input, labels) in enumerate(train_dataset):
train_data.append((input, labels))
train_labels.append(labels)
print("%s loaded." % train_dataset.seq_list[i])
print('Loading testing data ----------------------')
test_data = []
for i, (input, labels) in enumerate(test_dataset):
test_data.append((input, labels))
print("%s loaded." % test_dataset.seq_list[i])
# for model_lstm
if opt.use_lstm:
rnn = RNN(opt.n_input, opt.n_hidden, opt.n_layers, opt.n_class, opt.BATCH_SIZE, opt.df, opt.dk, opt.nr).cuda() # use lstm as classifier
else:
rnn = RNN(opt.n_input, opt.n_hidden, opt.n_layers, opt.n_class, opt.use_lstm).cuda() # use fc as classifier
print(rnn.state_dict().keys())
optimizer = torch.optim.Adam(rnn.parameters(), lr=opt.LR)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor=0.5) # set up scheduler
# Keep track of losses for plotting
best_loss = 10000
all_losses = []
current_loss = 3
FAA = [] # false area ration on test set
INTAP = [] # overall interval AP on test set
save_epoch = [] # list to save the model saving epoch
# train model
total_step = len(train_data)
for epoch in range(opt.EPOCH):
all_losses.append(current_loss)
current_loss = 0
for i, (input, labels) in enumerate(train_data):
optimizer.zero_grad()
feats = torch.from_numpy(input).float()
nframes, _ = input.shape
feats = feats.reshape(-1, nframes, 273).cuda()
#feats = feats.reshape(-1, nframes, opt.n_input*6).cuda()
# change label 0 to -1
labels[labels<1]=-1
labels = torch.from_numpy(labels)
labels = labels.float().cuda()
# Forward pass
outputs = rnn(feats)
outputs = torch.squeeze(outputs)
loss = F.mse_loss(outputs, labels)
# print model parameter if loss is NaN
if opt.verbose > 0:
if torch.isnan(loss):
print_model(rnn)
print('Epoch {}, step {}'.format(epoch+1, i+1))
raw_input("Press Enter to continue ...")
# Backward and optimize
loss.backward()
# This line is used to prevent the vanishing / exploding gradient problem
torch.nn.utils.clip_grad_norm_(rnn.parameters(), opt.clip_grad)
optimizer.step()
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch + 1, opt.EPOCH, i + 1, total_step, loss.item()))
current_loss = current_loss + loss.item()
writer.add_scalar('loss/loss', current_loss, epoch)
scheduler.step(current_loss) # update lr if needed
# save model parameters and loss figure
if ((epoch+1) % opt.save_every) == 0:
# compute false area on test set
if not opt.DEBUG:
false_area, overall_IAPlist = evaluate_MA(rnn, test_data)
FAA.append(torch.sum(false_area).item())
INTAP.append(overall_IAPlist[-2]) # get the interval AP at threshold 0.8
save_epoch.append(epoch+1)
if FAA[-1] == min(FAA):
# if has the minimum test error, save model
checkpoint_dir = './model/{}/'.format(opt.checkpoint)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
if epoch > 100:
model_str = checkpoint_dir + 'net-best.pth'
torch.save(rnn, model_str)
checkpoint_dir = './model/{}/'.format(opt.checkpoint)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
if opt.verbose == 2:
print('Making dir: {}'.format(checkpoint_dir))
model_str = checkpoint_dir + 'net-{}'.format(str(epoch+1))
if opt.verbose > 0:
print('Model saved to: {}.pth'.format(model_str))
if epoch >= 100:
torch.save(rnn, model_str+'.pth')
# save interval AP
np.savetxt(model_str + 'AP.csv', np.asarray(overall_IAPlist), fmt='%0.5f')
# save miss detection
np.savetxt(model_str + 'MD.txt', np.asarray(FAA), fmt='%0.5f')
# draw miss detection v.s. epoch figure
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.plot(range(epoch+1), all_losses, color=color)
ax1.set_xlabel('Epochs')
ax1.set_ylabel('Loss', color=color)
ax2 = ax1.twinx()
color = 'tab:blue'
ax2.set_ylabel('Miss detection area ratio', color=color)
ax2.plot(save_epoch, FAA, 'bd')
fig.savefig(model_str+'.png')
plt.close()
# draw intervalAP v.s. epoch figure
fig1, ax3 = plt.subplots()
color = 'tab:red'
ax3.plot(range(epoch+1), all_losses, color=color)
ax3.set_xlabel('Epochs')
ax3.set_ylabel('Loss', color=color)
ax4 = ax3.twinx()
color = 'tab:blue'
ax4.set_ylabel('Overall interval AP', color=color)
ax4.plot(save_epoch, INTAP, 'bd')
fig1.savefig(model_str+'_AP.png')
plt.close()
# print the loss on training set and evaluation metrics on test set to file
orig_stdout = sys.stdout
f = open(checkpoint_dir + '/loss.txt', 'w')
sys.stdout = f
print('Loss over epochs:')
print(all_losses)
if not opt.DEBUG:
print('Miss detection area ratio:')
print(FAA)
f.close()
sys.stdout = orig_stdout
| 41.051163 | 140 | 0.643327 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,575 | 0.291752 |
9d192ebb1226024bcb7fe7faa5cd19ef549419f8 | 130 | py | Python | illud/exceptions/quit_exception.py | AustinScola/illud | a6aca1de38bbe9d5a795aaa084bcbd6731767d18 | [
"MIT"
]
| 1 | 2020-12-05T00:59:15.000Z | 2020-12-05T00:59:15.000Z | illud/exceptions/quit_exception.py | AustinScola/illud | a6aca1de38bbe9d5a795aaa084bcbd6731767d18 | [
"MIT"
]
| 112 | 2021-01-15T21:42:27.000Z | 2021-04-17T19:11:21.000Z | illud/exceptions/quit_exception.py | AustinScola/illud | a6aca1de38bbe9d5a795aaa084bcbd6731767d18 | [
"MIT"
]
| null | null | null | """Raised to quit."""
from illud.exception import IlludException
class QuitException(IlludException):
"""Raised to quit."""
| 18.571429 | 42 | 0.723077 | 62 | 0.476923 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.323077 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.