max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
Scripts/simulation/apartments/situations/neighbor_react_to_you_situation.py | velocist/TS4CheatsInfo | 0 | 12792151 | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\apartments\situations\neighbor_react_to_you_situation.py
# Compiled at: 2018-07-22 23:17:14
# Size of source mod 2**32: 9737 bytes
import random
from event_testing.resolver import DoubleSimResolver
from interactions.utils.loot import LootActions
from relationships.global_relationship_tuning import RelationshipGlobalTuning
from sims4.tuning.instances import lock_instance_tunables
from sims4.tuning.tunable import TunableList
from situations.bouncer.bouncer_types import BouncerExclusivityCategory, RequestSpawningOption, BouncerRequestPriority
from situations.situation import Situation
from situations.situation_complex import SituationComplexCommon, TunableSituationJobAndRoleState, CommonSituationState, SituationStateData, CommonInteractionCompletedSituationState, SituationState
from situations.situation_guest_list import SituationGuestInfo, SituationGuestList
from situations.situation_types import SituationCreationUIOption
from ui.ui_dialog_notification import TunableUiDialogNotificationSnippet
import services
class _StartSituationState(SituationState):
def _on_set_sim_role_state(self, sim, *args, **kwargs):
(super()._on_set_sim_role_state)(sim, *args, **kwargs)
relationship_tracker = sim.sim_info.relationship_tracker
for sim_info in services.active_household():
if relationship_tracker.has_bit(sim_info.sim_id, RelationshipGlobalTuning.NEIGHBOR_GIVEN_KEY_RELATIONSHIP_BIT):
self._change_state(self.owner._hangout_state())
return
self._change_state(self.owner._knock_on_door_state())
class _KnockOnDoorState(CommonInteractionCompletedSituationState):
def _on_interaction_of_interest_complete(self, **kwargs):
self._change_state(self.owner._wait_to_be_greeted())
class _NeighborWaitToBeGreetedState(CommonInteractionCompletedSituationState):
FACTORY_TUNABLES = {'early_exit_loot':TunableList(description='\n A list of loot to apply between the neighbor and the active\n household Sims if this stiuation state times out.\n ',
tunable=LootActions.TunableReference(description='\n A loot action applied to all of the active household Sims if this\n situation state times out.\n ')),
'early_exit_notification':TunableUiDialogNotificationSnippet(description='\n Notification that will be shown when this situation state times\n out.\n ')}
def __init__(self, *args, early_exit_loot=tuple(), early_exit_notification=None, **kwargs):
(super().__init__)(*args, **kwargs)
self._early_exit_loot = early_exit_loot
self._early_exit_notification = early_exit_notification
def _on_interaction_of_interest_complete(self, **kwargs):
self._change_state(self.owner._hangout_state())
def timer_expired(self):
for sim_info in services.active_household():
resolver = DoubleSimResolver(sim_info, self.owner._neighbor_sim.sim_info)
for loot_action in self._early_exit_loot:
loot_action.apply_to_resolver(resolver)
resolver = DoubleSimResolver(services.active_sim_info(), self.owner._neighbor_sim.sim_info)
early_exit_notification = self._early_exit_notification((services.active_sim_info()), resolver=resolver)
early_exit_notification.show_dialog()
self.owner._self_destruct()
class _NeighborHangoutState(CommonSituationState):
def timer_expired(self):
self.owner._self_destruct()
class NeighborReactToYouSituation(SituationComplexCommon):
INSTANCE_TUNABLES = {'_knock_on_door_state':_KnockOnDoorState.TunableFactory(description='\n Situation State for the Sim to knock on the door.\n ',
locked_args={'time_out':None,
'allow_join_situation':True}),
'_wait_to_be_greeted':_NeighborWaitToBeGreetedState.TunableFactory(description='\n Situation State for the Sim to wait to be greeted.\n ',
locked_args={'allow_join_situation': True}),
'_hangout_state':_NeighborHangoutState.TunableFactory(description='\n Situation state for the Sim to hang out for a while.\n ',
locked_args={'allow_join_situation': True}),
'_starting_neighbor_job_and_role_state':TunableSituationJobAndRoleState(description='\n Job and Role State for the neighbor.\n ')}
REMOVE_INSTANCE_TUNABLES = ('_buff', 'targeted_situation', '_resident_job', '_relationship_between_job_members',
'audio_sting_on_start', 'force_invite_only', 'screen_slam_gold',
'screen_slam_silver', 'screen_slam_bronze', 'screen_slam_no_medal') + Situation.SITUATION_START_FROM_UI_REMOVE_INSTANCE_TUNABLES + Situation.SITUATION_USER_FACING_REMOVE_INSTANCE_TUNABLES
@classmethod
def _states(cls):
return (SituationStateData(1, _StartSituationState),
SituationStateData(2, _KnockOnDoorState, factory=(cls._knock_on_door_state)),
SituationStateData(3, _NeighborWaitToBeGreetedState, factory=(cls._wait_to_be_greeted)),
SituationStateData(4, _NeighborHangoutState, factory=(cls._hangout_state)))
@classmethod
def _get_tuned_job_and_default_role_state_tuples(cls):
return [(cls._starting_neighbor_job_and_role_state.job, cls._starting_neighbor_job_and_role_state.role_state)]
@classmethod
def default_job(cls):
pass
def __init__(self, *args, **kwargs):
(super().__init__)(*args, **kwargs)
self._neighbor_sim = None
def _on_set_sim_job(self, sim, job_type):
super()._on_set_sim_job(sim, job_type)
self._neighbor_sim = sim
@classmethod
def get_predefined_guest_list(cls):
active_sim_info = services.active_sim_info()
neighbor_sim_id = cls._get_neighbor()
if neighbor_sim_id is None:
return
guest_list = SituationGuestList(invite_only=True, host_sim_id=neighbor_sim_id,
filter_requesting_sim_id=(active_sim_info.sim_id))
guest_list.add_guest_info(SituationGuestInfo(neighbor_sim_id, (cls._starting_neighbor_job_and_role_state.job),
(RequestSpawningOption.DONT_CARE),
(BouncerRequestPriority.BACKGROUND_MEDIUM),
expectation_preference=True))
return guest_list
@classmethod
def _get_neighbor(cls):
active_sim_info = services.active_sim_info()
neighbors = services.sim_filter_service().submit_filter((cls._starting_neighbor_job_and_role_state.job.filter), callback=None,
requesting_sim_info=active_sim_info,
allow_yielding=False,
blacklist_sim_ids={sim_info.sim_id for sim_info in services.active_household()},
gsi_source_fn=(cls.get_sim_filter_gsi_name))
if not neighbors:
return
neighbor_sim_infos_at_home = [result.sim_info for result in neighbors if result.sim_info.is_at_home]
neighbor_sim_id = random.choice(neighbor_sim_infos_at_home).sim_id if neighbor_sim_infos_at_home else None
return neighbor_sim_id
def start_situation(self):
super().start_situation()
self._change_state(_StartSituationState())
lock_instance_tunables(NeighborReactToYouSituation, exclusivity=(BouncerExclusivityCategory.NORMAL),
creation_ui_option=(SituationCreationUIOption.NOT_AVAILABLE),
duration=0,
_implies_greeted_status=False) | 1.492188 | 1 |
multiclass/train.py | kesamet/examples | 1 | 12792152 | <reponame>kesamet/examples<gh_stars>1-10
import joblib
from typing import Tuple
import bdrk
import numpy as np
import pandas as pd
from bdrk.model_analyzer import ModelAnalyzer, ModelTypes
from boxkite.monitoring.collector import (
BaselineMetricCollector,
FeatureHistogramCollector,
InferenceHistogramCollector
)
from boxkite.monitoring.encoder import MetricEncoder
from environs import Env
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler
env = Env()
OUTPUT_MODEL_PATH = env("OUTPUT_MODEL_PATH")
TRAIN_DATA_PATH = env("TRAIN_DATA_PATH")
TEST_DATA_PATH = env("TEST_DATA_PATH")
C = env.float("C")
CONFIG_FAI = {
"large_rings": {
"privileged_attribute_values": [1],
# privileged group name corresponding to values=[1]
"privileged_group_name": "Large",
"unprivileged_attribute_values": [0],
# unprivileged group name corresponding to values=[0]
"unprivileged_group_name": "Small",
}
}
def load_dataset(filepath: str,
target: str) -> Tuple[pd.core.frame.DataFrame,
np.ndarray]:
"""
Loads the dataset and returns the features as a pandas dataframe and
the target variable as a numpy array.
:param filepath: Path to load the data
:type filepath: str
:param target: Target variable
:type target: str
:return: The features pandas dataframe and the target numpy array
:rtype: tuple[pandas.core.frame.DataFrame, numpy.ndarray]
"""
df = pd.read_csv(filepath)
df['large_rings'] = (df['Rings'] > 10).astype(int)
# Ensure nothing missing
original_len = len(df)
df.dropna(how="any", axis=0, inplace=True)
num_rows_dropped = original_len - len(df)
if num_rows_dropped > 0:
print(f"Warning - dropped {num_rows_dropped} rows with NA data.")
y = df[target].values
df.drop(target, axis=1, inplace=True)
return df, y
def train_log_reg_model(X: pd.core.frame.DataFrame,
y: np.ndarray,
seed: float = 0,
C: float = 1,
verbose: bool = False) -> Pipeline:
"""
Scales the features and trains a logistic regression model.
:param X: Features for training
:type X: pandas.core.frame.DataFrame
:param y: Target variable
:type y: numpy.ndarray
:param seed: `random_state` for logistic regression model
:type seed: float
:param C: Inverse of regularization strength
:type C: float
:param verbose: Whether to print additional info
:type verbose: bool
:return: Pipeline of transforms with a trained final estimator
:rtype: sklearn.pipeline.Pipeline
"""
verbose and print('\nTRAIN\nScaling...')
scaling = StandardScaler()
X = scaling.fit_transform(X)
verbose and print('Fitting...')
verbose and print('C:', C)
model = LogisticRegression(random_state=seed, C=C, max_iter=4000)
model.fit(X, y)
verbose and print('Chaining pipeline...')
pipe = Pipeline([('scaling', scaling), ('model', model)])
verbose and print('Done training.')
return pipe
def compute_log_metrics(pipe: Pipeline,
x_test: pd.core.frame.DataFrame,
y_test: np.ndarray,
y_test_onehot: np.ndarray):
"""
Computes, prints and log metrics.
:param pipe: Pipeline of transforms with a trained final estimator
:type pipe: sklearn.pipeline.Pipeline
:param x_test: Features for testing
:type x_test: pandas.core.frame.DataFrame
:param y_test: Target variable data for testing
:type y_test: numpy.ndarray
:param y_test_onehot: One hot encoded target variable data
:type y_test_onehot: numpy.ndarray
:return: Test predicted probability and predictions
:rtype: tuple[numpy.ndarray, numpy.ndarray]
"""
test_prob = pipe.predict_proba(x_test)
test_pred = pipe.predict(x_test)
acc = metrics.accuracy_score(y_test, test_pred)
precision = metrics.precision_score(y_test, test_pred, average='macro')
recall = metrics.recall_score(y_test, test_pred, average='macro')
f1_score = metrics.f1_score(y_test, test_pred, average='macro')
roc_auc = metrics.roc_auc_score(y_test_onehot,
test_prob,
average='macro',
multi_class='ovr')
avg_prc = metrics.average_precision_score(y_test_onehot,
test_prob,
average='macro')
print("\nEVALUATION\n"
f"\tAccuracy = {acc:.4f}\n"
f"\tPrecision (macro) = {precision:.4f}\n"
f"\tRecall (macro) = {recall:.4f}\n"
f"\tF1 score (macro) = {f1_score:.4f}\n"
f"\tROC AUC (macro) = {roc_auc:.4f}\n"
f"\tAverage precision (macro) = {avg_prc:.4f}")
# Bedrock Logger: captures model metrics
bdrk.log_metrics(
{
"Accuracy": acc,
"Precision (macro)": precision,
"Recall (macro)": recall,
"F1 Score (macro)": f1_score,
"ROC AUC (macro)": roc_auc,
"Avg precision (macro)": avg_prc,
}
)
# `log_chart_data` assumes binary classification
# For multiclass labels, we can use a "micro-average" by
# quantifying score on all classes jointly
# See https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html # noqa: E501
# This will allow us to use the same `log_chart_data` method
bdrk.log_binary_classifier_metrics(
y_test_onehot.ravel().astype(int).tolist(), # list of int
test_prob.ravel().astype(float).tolist() # list of float
)
return test_prob, test_pred
def main():
x_train, y_train = load_dataset(
filepath=TRAIN_DATA_PATH,
target='Type'
)
x_test, y_test = load_dataset(
filepath=TEST_DATA_PATH,
target='Type'
)
print('X (train)')
print(x_train)
# sklearn `roc_auc_score` and `average_precision_score` expects
# binary label indicators with shape (n_samples, n_classes)
enc = OneHotEncoder(handle_unknown='ignore', sparse=False)
y_train_onehot = enc.fit_transform(y_train.reshape(-1, 1))
y_test_onehot = enc.fit_transform(y_test.reshape(-1, 1))
print('\nCATEGORIES')
for value, category in enumerate(enc.categories_[0]):
print(f'{category} : {value}')
# Convert target variable to numeric values
# ModelMonitoringService.export_text expect both features
# and inference to be numeric values
y_train = np.argmax(y_train_onehot, axis=1)
y_test = np.argmax(y_test_onehot, axis=1)
pipe = train_log_reg_model(x_train,
y_train,
seed=0,
C=C,
verbose=True)
# Save trained model
feature_names = x_train.columns.tolist()
print("\nSAMPLE FEATURES")
print({
feature_name: str(x_train[feature_name][0])
for feature_name in feature_names
})
joblib.dump([feature_names, enc, pipe], OUTPUT_MODEL_PATH)
print('\nSaved trained one hot encoder and logistic regression model.')
test_prob, test_pred = compute_log_metrics(pipe,
x_test,
y_test,
y_test_onehot)
# Save feature and inferance distribution
train_predicted = pipe.predict(x_train).flatten().tolist()
collectors = [
FeatureHistogramCollector(
data=x_train.iteritems(),
discrete={7, 8}, # Specify which column indices are discrete
),
InferenceHistogramCollector(data=train_predicted,
is_discrete=True)
# Specify inference as discrete
]
encoder = MetricEncoder(collectors=collectors)
with open(BaselineMetricCollector.DEFAULT_HISTOGRAM_PATH, "wb") as f:
f.write(encoder.as_text())
print('Saved feature and inference distribution.')
# Train Shap model and calculate xafai metrics
analyzer = (
ModelAnalyzer(pipe[1],
model_name='logistic',
model_type=ModelTypes.LINEAR)
.train_features(x_train)
.test_features(x_test)
.fairness_config(CONFIG_FAI)
.test_labels(y_test)
.test_inference(test_pred)
)
analyzer.analyze()
print('Saved Shap model and fairness results.')
if __name__ == '__main__':
bdrk.init()
with bdrk.start_run():
main()
| 2.46875 | 2 |
src/deps/cares/cares.gyp | odant/conan-jscript | 0 | 12792153 | <gh_stars>0
{
'target_defaults': {
'conditions': [
['OS!="win"', {
'defines': [
'_DARWIN_USE_64_BIT_INODE=1',
'_LARGEFILE_SOURCE',
'_FILE_OFFSET_BITS=64',
'_GNU_SOURCE'
]
}],
[ 'OS=="aix"', {
'include_dirs': [ 'config/aix' ],
'sources': [ 'config/aix/ares_config.h' ],
'defines': [
# Support for malloc(0)
'_LINUX_SOURCE_COMPAT=1',
'_ALL_SOURCE=1'],
}],
['OS=="solaris"', {
'defines': [
'__EXTENSIONS__',
'_XOPEN_SOURCE=500'
]
}]
]
},
'targets': [
{
'target_name': 'cares',
'type': '<(library)',
'include_dirs': [ 'include', 'src/lib' ],
'direct_dependent_settings': {
'include_dirs': [ 'include', 'src/lib' ]
},
'sources': [
'include/ares.h',
'include/ares_dns.h',
'include/ares_rules.h',
'include/ares_version.h',
'src/lib/ares_android.c',
'src/lib/ares_cancel.c',
'src/lib/ares__close_sockets.c',
'src/lib/ares_create_query.c',
'src/lib/ares_data.c',
'src/lib/ares_data.h',
'src/lib/ares_destroy.c',
'src/lib/ares_expand_name.c',
'src/lib/ares_expand_string.c',
'src/lib/ares_fds.c',
'src/lib/ares_free_hostent.c',
'src/lib/ares_free_string.c',
'src/lib/ares_freeaddrinfo.c',
'src/lib/ares_getenv.h',
'src/lib/ares_getaddrinfo.c',
'src/lib/ares_gethostbyaddr.c',
'src/lib/ares_gethostbyname.c',
'src/lib/ares__get_hostent.c',
'src/lib/ares_getnameinfo.c',
'src/lib/ares_getsock.c',
'src/lib/ares_init.c',
'src/lib/ares_ipv6.h',
'src/lib/ares_library_init.c',
'src/lib/ares_library_init.h',
'src/lib/ares_llist.c',
'src/lib/ares_llist.h',
'src/lib/ares_mkquery.c',
'src/lib/ares_nameser.h',
'src/lib/ares_nowarn.c',
'src/lib/ares_nowarn.h',
'src/lib/ares_options.c',
'src/lib/ares__parse_into_addrinfo.c',
'src/lib/ares_parse_aaaa_reply.c',
'src/lib/ares_parse_a_reply.c',
'src/lib/ares_parse_caa_reply.c',
'src/lib/ares_parse_mx_reply.c',
'src/lib/ares_parse_naptr_reply.c',
'src/lib/ares_parse_ns_reply.c',
'src/lib/ares_parse_ptr_reply.c',
'src/lib/ares_parse_soa_reply.c',
'src/lib/ares_parse_srv_reply.c',
'src/lib/ares_parse_txt_reply.c',
'src/lib/ares_platform.h',
'src/lib/ares_private.h',
'src/lib/ares_process.c',
'src/lib/ares_query.c',
'src/lib/ares__read_line.c',
'src/lib/ares__readaddrinfo.c',
'src/lib/ares_search.c',
'src/lib/ares_send.c',
'src/lib/ares_setup.h',
'src/lib/ares__sortaddrinfo.c',
'src/lib/ares_strcasecmp.c',
'src/lib/ares_strcasecmp.h',
'src/lib/ares_strdup.c',
'src/lib/ares_strdup.h',
'src/lib/ares_strerror.c',
'src/lib/ares_strsplit.c',
'src/lib/ares_timeout.c',
'src/lib/ares__timeval.c',
'src/lib/ares_version.c',
'src/lib/ares_writev.c',
'src/lib/ares_writev.h',
'src/lib/bitncmp.c',
'src/lib/bitncmp.h',
'src/lib/inet_net_pton.c',
'src/lib/inet_ntop.c',
'src/lib/ares_inet_net_pton.h',
'src/lib/setup_once.h',
'src/tools/ares_getopt.c',
'src/tools/ares_getopt.h',
],
'conditions': [
[ 'library=="static_library"', {
'defines': [ 'CARES_STATICLIB' ]
}, {
'defines': [ 'CARES_BUILDING_LIBRARY' ]
}],
[ 'OS=="win"', {
'defines': [
'CARES_PULL_WS2TCPIP_H=1',
'_WINSOCK_DEPRECATED_NO_WARNINGS',
],
'include_dirs': [ 'config/win32' ],
'sources': [
'src/lib/config-win32.h',
'src/lib/windows_port.c',
'src/lib/ares_getenv.c',
'src/lib/ares_iphlpapi.h',
'src/lib/ares_platform.c'
],
'libraries': [
'-lws2_32.lib',
'-liphlpapi.lib'
],
}, {
# Not Windows i.e. POSIX
'cflags': [
'-g',
'-pedantic',
'-Wall',
'-Wextra',
'-Wno-unused-parameter'
],
'defines': [ 'HAVE_CONFIG_H' ],
}],
[ 'OS not in "win android"', {
'cflags': [
'--std=gnu89'
],
}],
[ 'OS=="linux"', {
'include_dirs': [ 'config/linux' ],
'sources': [ 'config/linux/ares_config.h' ]
}],
[ 'OS=="mac"', {
'include_dirs': [ 'config/darwin' ],
'sources': [ 'config/darwin/ares_config.h' ]
}],
[ 'OS=="freebsd" or OS=="dragonflybsd"', {
'include_dirs': [ 'config/freebsd' ],
'sources': [ 'config/freebsd/ares_config.h' ]
}],
[ 'OS=="openbsd"', {
'include_dirs': [ 'config/openbsd' ],
'sources': [ 'config/openbsd/ares_config.h' ]
}],
[ 'OS=="android"', {
'include_dirs': [ 'config/android' ],
'sources': [ 'config/android/ares_config.h' ],
}],
[ 'OS=="solaris"', {
'include_dirs': [ 'config/sunos' ],
'sources': [ 'config/sunos/ares_config.h' ],
'direct_dependent_settings': {
'libraries': [
'-lsocket',
'-lnsl'
]
}
}]
]
}
]
}
| 1.195313 | 1 |
app/models/seller_review.py | AlexEpstein1/316Amazon | 0 | 12792154 | <gh_stars>0
from flask import current_app as app, flash, redirect, render_template, request, url_for
from flask_login import current_user
from sqlalchemy import exc
import datetime
# Reviews of Sellers
# CREATE TABLE SellerReview (
# user_id INT NOT NULL REFERENCES Users(id),
# seller_id INT NOT NULL REFERENCES Sellers(id),
# date_time DATE NOT NULL,
# description VARCHAR(256) NOT NULL,
# rating DECIMAL(10, 2) NOT NULL CHECK(rating >= 1 AND rating <= 5),
# PRIMARY KEY (user_id, seller_id)
# FOREIGN KEY (user_id, seller_id) REFERENCES Purchases(uid, pid)
# );
class SellerReview:
def __init__(self, **kwargs):
self.user_id = kwargs.get('user_id')
self.seller_id = kwargs.get('seller_id')
self.date_time = kwargs.get('date_time')
self.description = kwargs.get('description', '')
self.rating = kwargs.get('rating')
self.exists = kwargs.get('exists')
self.reviews = kwargs.get('reviews')
self.last_review = kwargs.get('last_review')
self.avg_rating = kwargs.get('avg_rating')
@staticmethod
def get(user_id, offset = 0, seller_id = None):
# If no passed in `user_id`, then return all reviews for that seller
if user_id is None:
rows = app.db.execute('''
SELECT user_id, seller_id, date_time, description, rating
FROM SellerReview
WHERE seller_id = :seller_id
ORDER BY date_time DESC
LIMIT 10 OFFSET :offset
''',
seller_id = seller_id,
offset = offset)
# If no passed in `seller_id`, then return all reviews from that user
elif seller_id is None:
rows = app.db.execute('''
SELECT user_id, seller_id, date_time, description, rating
FROM SellerReview
WHERE user_id = :user_id
ORDER BY date_time DESC
LIMIT 10 OFFSET :offset
''',
user_id = user_id,
offset = offset)
# If `seller_id` passed in, then return review from that user for the given seller
elif seller_id is not None:
rows = app.db.execute('''
SELECT user_id, seller_id, date_time, description, rating
FROM SellerReview
WHERE user_id = :user_id AND seller_id = :seller_id
LIMIT 10 OFFSET :offset
''',
user_id = user_id,
offset = offset,
seller_id = seller_id)
# If there exists a previous review, create the object
if rows:
reviews = [SellerReview(user_id = row[0],
seller_id = row[1],
date_time = row[2],
description = row[3],
rating = row[4],
exists = True) for row in rows]
# If no seller_id passed in, return just the first element, not the list
if seller_id is None or user_id is None:
return reviews
else:
return reviews[0]
# Otherwise, create an empty SellerReview object
else:
return(SellerReview(exists = False))
@staticmethod
def add_review(request, seller_id):
# Add in a check to see if the user has bought from this seller
rows = app.db.execute('''
SELECT order_id
FROM Purchases
WHERE buyer_id = :buyer_id AND seller_id = :seller_id AND status = 'Complete'
''',
buyer_id = current_user.id,
seller_id = seller_id)
# This means that user has not bought from this seller
if not rows:
return 'you have not had a completed purchase from this seller'
# Get information to add to review
date_time = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
description = request.form['body']
rating = request.form['numstars']
try:
rows = app.db.execute("""
INSERT INTO SellerReview(user_id, seller_id, date_time, description, rating)
VALUES(:user_id, :seller_id, :date_time, :description, :rating)
RETURNING user_id
""",
user_id = current_user.id,
seller_id = seller_id,
date_time = date_time,
description = description,
rating = rating)
# This means already a review for this seller from this user
except exc.IntegrityError as e:
return 'you have already made a review for this seller'
return 'Done'
@staticmethod
def update_review(request, seller_id):
# Get information to add to review
date_time = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
description = request.form['body']
rating = request.form['numstars']
rows = app.db.execute("""
UPDATE SellerReview
SET rating = :rating, description = :description, date_time = :date_time
WHERE user_id = :user_id AND seller_id = :seller_id
RETURNING user_id
""",
rating = rating,
description = description,
date_time = date_time,
user_id = current_user.id,
seller_id = seller_id)
return 'Done'
@staticmethod
def delete_review(seller_id):
rows = app.db.execute("""
DELETE FROM SellerReview
WHERE user_id = :user_id AND seller_id = :seller_id
RETURNING user_id
""",
user_id = current_user.id,
seller_id = seller_id)
# flash('Deleted product review for product ID: ' + product_id)
return 'Deleted seller review for seller ID: ' + seller_id
@staticmethod
def get_review_stats(user_id):
rows = app.db.execute('''
SELECT user_id, COUNT(*) AS reviews, MAX(date_time) AS last_review, AVG(rating) AS avg_rating
FROM SellerReview
WHERE user_id = :user_id
GROUP BY user_id
''',
user_id = user_id)
# If there exists a previous review, create the object
if rows:
return [SellerReview(user_id = row[0],
reviews = row[1],
last_review = row[2],
avg_rating = row[3],
exists = True) for row in rows][0]
# Otherwise, create an empty SellerReview object
else:
return (SellerReview(exists = False))
| 2.390625 | 2 |
web/kwmo/kwmo/controllers/file_download.py | tmbx/kas | 0 | 12792155 | import logging
import simplejson
import time
from pylons import request, response, session,config, tmpl_context as c
from pylons.controllers.util import abort, redirect_to
from kwmo.lib.base import BaseController, render
from kfs_lib import *
from kwmo.lib.file_download import kfs_download_generator
from kwmo.model.kfs_node import KfsNode
from kwmo.lib.kwmo_kcd_client import KcdClient
from kwmo.lib.config import get_cached_kcd_external_conf_object
log = logging.getLogger(__name__)
class FileDownloadController(BaseController):
requires_auth = ['download']
# Send download notifications.
send_notification = True
# Download a file.
def download(self, workspace_id):
workspace_id = int(workspace_id)
# Shortcuts
share_id = 0
user_id = session['user']['id']
kcd_conf = get_cached_kcd_external_conf_object()
# Permissions verification
if not c.perms.hasPerm('kfs.download.share.%i' % (share_id)):
log.error("File download denied: user has not the right permissions.")
# FIXME download permission error: get rid of 403 error: send an errror file?
return abort(403)
# Get kfs_file object from request.
web_kfs_file_json = request.params.get('kfs_file')
web_kfs_file_dict = simplejson.loads(web_kfs_file_json)
web_kfs_file = WebKFSFile().from_dict(web_kfs_file_dict)
assert web_kfs_file.workspace_id == workspace_id
assert web_kfs_file.share_id == share_id
# Get the kfs node associted to it.
kfs_node = KfsNode.get_by(workspace_id=workspace_id,
share_id=web_kfs_file.share_id,
inode_id=web_kfs_file.inode_id)
if c.workspace.public and not c.is_admin:
# Check that the user has rights to download from this path.
kfs_dir = kfs_node.parent
if not kfs_dir:
raise Exception("Public workspace file download: bad directory(0).")
if kfs_dir.name == "Original attachments":
kfs_dir = kfs_dir.parent
expected_dir_name = get_kfs_skurl_subject(session['email_info']['date'], session['email_info']['subject'])
if kfs_dir.name != expected_dir_name:
raise Exception("Public workspace file download: bad directory(1).")
kfs_dir_parent = kfs_dir.parent
if not kfs_dir_parent:
raise Exception("Public workspace file download: bad directory(2).")
if kfs_dir_parent.parent_inode_id != KFS_ROOT_INODE_ID:
raise Exception("Public workspace file download: bad directory(3).");
identities_emails = map(lambda x: x['email'], session['identities'])
if kfs_dir_parent.name not in identities_emails:
raise Exception("Public workspace file download: bad directory(4).");
# Get download mode.
mode = request.params.get('mode', 'save')
ctype = None
if mode == 'open':
# Guest mime type
import mimetypes
ctype, cencoding = mimetypes.guess_type(kfs_node.name, False) # not strict
#ctype, cencoding = mimetypes.guess_type(kfs_node.name, True) # strict
if not ctype:
# Download mime type
ctype, cencoding = ('application/octet-stream', None)
kfs_files = [kfs_node.to_dict()]
# Set the content type and the headers.
response.headers['Content-Type'] = ctype
#if cencoding:
# response.headers['Content-Encoding'] = cencoding
response.headers['Content-disposition'] = str('attachment; filename="%s"' % ( kfs_node.name.encode('latin1') ))
response.headers['Content-Transfer-Encoding'] = 'binary'
# Use a custom header that will be replaced in a middleware (workaround for
# the Content-Length header being dropped somewhere in pylons).
response.headers['X-Content-Length'] = str(kfs_node.file_size)
# These headers are necessary for the download to work on IE.
response.headers['Cache-Control'] = 'maxage=3600'
response.headers['Pragma'] = 'public'
if self.send_notification:
# Send download notification to KCD.
pubws_email_id = 0
if c.workspace.public:
pubws_email_id = session['email_id']
kc = KcdClient(kcd_conf)
try:
kc.send_download_notification(workspace_id, user_id, kfs_node.share_id, kfs_node.inode_id,
kfs_node.commit_id, pubws_email_id)
log.debug(("Sent download notification: workspace_id=%i, user_id=%i, share_id=%i" + \
", inode_id=%i, commit_id=%i, pubws_email_id=%i.") % \
( workspace_id, user_id, kfs_node.share_id, kfs_node.inode_id, kfs_node.commit_id,
pubws_email_id ) )
except Exception, e:
log.error("Sending download notification failed: '%s'." % ( str(e) ) )
else:
log.debug("Not sending download notification: user is admin: workspace_id=%i, user_id=%i." % \
( workspace_id, user_id ) )
return kfs_download_generator(kcd_conf, kfs_node.workspace_id, kfs_node.share_id, user_id, kfs_files)
| 2.09375 | 2 |
mopro/processing/submitter.py | fact-project/mopro3 | 1 | 12792156 | from threading import Thread, Event
import logging
import peewee
import socket
from ..database import CorsikaRun, CeresRun
from ..queries import get_pending_jobs, count_jobs, update_job_status
from .corsika import prepare_corsika_job
from .ceres import prepare_ceres_job
log = logging.getLogger(__name__)
hostname = socket.getfqdn()
class JobSubmitter(Thread):
def __init__(
self,
interval,
max_queued_jobs,
mopro_directory,
host,
port,
cluster,
location=None,
corsika_memory='4G',
ceres_memory='12G',
tmp_dir=None,
):
'''
Parametrs
----------
interval: int
number of seconds to wait between submissions
max_queued_jobs: int
Maximum number of jobs in the queue of the grid engine
No new jobs are submitted if the number of jobs in the queue is
higher than this value
mopro_directory: str
patch to the basic structure for erna. Logfiles, jars, xmls and
analysis output are stored in subdirectories to this directory.
host: str
hostname of the submitter node
port: int
port for the zmq communication
'''
super().__init__()
self.event = Event()
self.interval = interval
self.max_queued_jobs = max_queued_jobs
self.mopro_directory = mopro_directory
self.host = host
self.port = port
self.cluster = cluster
self.location = location or hostname
self.ceres_memory = ceres_memory
self.corsika_memory = corsika_memory
self.tmp_dir = tmp_dir
def run(self):
while not self.event.is_set():
try:
self.process_pending_jobs()
except peewee.OperationalError:
log.exception('Lost database connection')
except Exception as e:
log.exception('Error during submission: {}'.format(e))
self.event.wait(self.interval)
def terminate(self):
self.event.set()
def process_pending_jobs(self):
'''
Fetches pending runs from the processing database
and submits them using qsub if not to many jobs are running already.
'''
pending_corsika = count_jobs(CorsikaRun, status='created')
pending_ceres = count_jobs(CeresRun, status='created')
n_queued = self.cluster.n_queued
log.debug(f'{self.cluster.n_running} jobs running')
log.debug(f'{n_queued} jobs queued')
log.debug(f'{pending_corsika} pending CORSIKA jobs in database')
log.debug(f'{pending_ceres} pending CERES jobs in database')
new_jobs = self.max_queued_jobs - n_queued
if new_jobs > 0:
pending_jobs = get_pending_jobs(max_jobs=new_jobs, location=self.location)
for job in pending_jobs:
if self.event.is_set():
break
kwargs = {
'mopro_directory': self.mopro_directory,
'submitter_host': self.host,
'submitter_port': self.port,
'tmp_dir': self.tmp_dir
}
try:
if isinstance(job, CorsikaRun):
self.cluster.submit_job(
**prepare_corsika_job(job, **kwargs),
memory=self.corsika_memory
)
log.info(f'Submitted new CORSIKA job with id {job.id}')
elif isinstance(job, CeresRun):
self.cluster.submit_job(
**prepare_ceres_job(job, **kwargs),
memory=self.ceres_memory
)
log.info(f'Submitted new CERES job with id {job.id}')
else:
raise ValueError(f'Unknown job type: {job}')
update_job_status(type(job), job.id, 'queued', location=self.location)
except:
log.exception('Could not submit job')
update_job_status(type(job), job.id, 'failed')
| 2.109375 | 2 |
ensemble.py | statho/pytorch-cifar | 0 | 12792157 | <filename>ensemble.py<gh_stars>0
from tqdm import tqdm
import os, pdb, logging, argparse
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.utils.tensorboard import SummaryWriter
import torchvision
import torchvision.transforms as transforms
from models import *
parser = argparse.ArgumentParser()
parser.add_argument('--name', default='', type=str, help='name of experiment')
parser.add_argument('--model', default='resnet18', type=str, help='model to train')
parser.add_argument('--use_augm', action='store_true', help='use data augmentations?')
parser.add_argument('--lr', default=1e-2, type=float, help='learning rate')
parser.add_argument('--iter', default=100, type=int, help='number of total iterations to run')
args = parser.parse_args()
# use this to init weight as required
torch.manual_seed(0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
model_dict = {
'resnet18' : ResNet18(), # 11m params
'resnet34' : ResNet34(), # 21m
'resnet50' : ResNet50(), # 23m
'resnet101': ResNet101(), # 42m
'resnet152': ResNet152(), # 58m
'vgg19' : VGG('VGG19'),
}
chpt_vanilla_dict = {
'resnet18': ['R18_1', 'R18_2', 'R18_3'],
'resnet50': ['R50_1', 'R50_2', 'R50_3'],
}
chpt_augm_dict = {
'resnet18': ['R18_augm1', 'R18_augm2', 'R18_augm3'],
'resnet50': ['R50_augm1', 'R50_augm2', 'R50_augm3'],
}
chpt_name = 'net_80.pth'
def test(model, testloader, criterion, best_acc):
net.eval()
test_loss = 0
total_samples, correct_samples = 0, 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs = inputs.to(device) # [bs, 3, 32, 32]
targets = targets.to(device) # [bs]
outputs = model(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total_samples += targets.size(0)
correct_samples += predicted.eq(targets).sum().item()
# save loss over the whole test set at TB
test_acc = 100. * correct_samples / total_samples
total_test_loss = test_loss / (batch_idx+1)
return total_test_loss, test_acc
def main():
checkpoint_dir = 'results_ensembles/{}'.format(args.name)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
## Logging
logging.basicConfig(filename=os.path.join(checkpoint_dir, 'optim.log'), level=logging.INFO)
console = logging.StreamHandler()
logging.getLogger().addHandler(console)
console.setLevel(logging.INFO)
logging.info(args)
writer_test = SummaryWriter(log_dir=os.path.join(checkpoint_dir, 'optim'))
## Data
print('=> Preparing data..')
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
testset = torchvision.datasets.CIFAR10(root='data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(
testset,
# batch_size = len(testset),
batch_size = 1000,
shuffle = False,
# sampler = data_sampler(testset, shuffle=False),
num_workers = 4,
)
loader = iter(testloader)
## Create model
model_names = chpt_augm_dict[args.model] if args.use_augm else chpt_vanilla_dict[args.model]
num_models = len(model_names)
print('=> Using {} models {}'.format(num_models, args.model))
model = model_dict[args.model]
model = model.to(device)
model.train()
## Init alphas and optimizer
alphas = torch.rand(num_models) / num_models
alphas[-1] = 1 - sum(alphas[:-1])
assert sum(alphas).item() == 1, 'Alphas should sum to 1'
num_iters = args.iter
criterion = nn.CrossEntropyLoss()
alphas = torch.nn.Parameter(alphas)
optimizer = optim.Adam([alphas], lr=args.lr, betas=(0.9, 0.999))
### Main optimization loop
## TODO: alphas are not updating -- solve this issue
for it in range(num_iters):
# load model weights and multiply with alphas
for ii, model_name in enumerate(model_names):
model_path = os.path.join('results', model_name, chpt_name)
checkpoint = torch.load(model_path)['net']
if ii==0:
model.load_state_dict(checkpoint)
for name, model_params in model.named_parameters():
if not name.endswith('num_batches_tracked'):
model_params = torch.nn.Parameter( model_params * alphas[ii] )
else:
for name, model_params in model.named_parameters():
if not name.endswith('num_batches_tracked'):
model_params = torch.nn.Parameter( model_params + alphas[ii] * checkpoint[name] )
batch = next(loader)
inputs, targets = batch
inputs = inputs.to(device)
targets = targets.to(device)
outputs = model(inputs)
loss = criterion(outputs, targets)
print(alphas)
# print(list(model.parameters())[0][0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(alphas)
# print(list(model.parameters())[0][0])
pdb.set_trace()
# criterion = nn.CrossEntropyLoss()
# optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4)
# for idx in pbar:
# i = idx + args.start_iter
# if i > args.iter:
# print("TRAINING IS DONE!")
# break
# train_loss = 0
# total_samples, correct_samples = 0, 0
# batch = next(loader)
# inputs, targets = batch
# inputs = inputs.to(device) # [bs, 3, 32, 32]
# targets = targets.to(device) # [bs]
# outputs = net(inputs)
# loss = criterion(outputs, targets)
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# scheduler.step()
# train_loss += loss.item()
# _, predicted = outputs.max(1)
# total_samples += targets.size(0)
# correct_samples += predicted.eq(targets).sum().item()
# train_acc = correct_samples / total_samples
# # log metrics to TB every 100 iterations
# if i % 100 == 0:
# writer_train.add_scalar('train_acc', train_acc, i)
# writer_train.add_scalar('train_loss', train_loss, i)
# # test model and save it every 5000 iterations
# if i % args.iter_save == 0:
# test_loss, test_acc = test(net, testloader, criterion, best_acc)
# logging.info('=> Saving model with Loss: {:.5f} -- ACC: {} at iteration {} K'.format(test_loss, test_acc, int( i / 1000)) )
# state = {
# 'net' : net.state_dict(),
# 'acc' : test_acc,
# 'iter' : i,
# }
# torch.save(state, os.path.join(checkpoint_dir, 'net_{}.pth'.format( int( i / 1000) )))
# net.train()
# writer_test.add_scalar('test_acc', test_acc, i)
# writer_test.add_scalar('test_loss', test_loss, i)
# if test_acc > best_acc:
# logging.info('=> Accuracy improved from {} --> {} at iteration {} K'.format(best_acc, test_acc, int(i / 1000) ))
# state = {
# 'net' : net.state_dict(),
# 'acc' : test_acc,
# 'iter' : i,
# }
# torch.save(state, os.path.join(checkpoint_dir, 'net_best.pth'))
# best_acc = test_acc
if __name__ == '__main__':
main()
| 2.0625 | 2 |
web/app/views.py | sanjifr3/Narrator | 11 | 12792158 | """
Backend of Narrator web app.
"""
import os
import sys
import shutil
import pandas as pd
import skimage.io as io
import PIL
from flask import render_template, request, redirect, url_for, send_from_directory, session
from app import app
sys.path.append(app.config['COCOAPI_PATH'] + 'PythonAPI')
from pycocotools.coco import COCO
sys.path.append('../src/')
from Narrator import Narrator
# Construct classes
narrator = Narrator(
root_path=app.config['ROOT_PATH'],
coco_vocab_path=app.config['COCO_VOCAB_PATH'],
msrvtt_vocab_path=app.config['MSRVTT_VOCAB_PATH'],
base_model=app.config['ENCODER_MODEL'],
ic_model_path=app.config['IC_MODEL_PATH'],
vc_model_path=app.config['VC_MODEL_PATH'],
ic_rnn_type=app.config['IC_RNN_TYPE'],
vc_rnn_type=app.config['VC_RNN_TYPE']
)
# Load samples from file
try:
samplesDF = pd.read_csv(
app.config['SAMPLES_DIR'] + 'sample_captions.csv', index_col=0)
except:
samplesDF = pd.DataFrame(
columns=['id', 'caption', 'gt'], index=['name']).head()
# Update any existing samples
if len(app.config['SAMPLES_TO_UPDATE']) > 0:
# Load image and video datasets
coco = COCO(app.config[
'COCOAPI_PATH'] + 'annotations/instances_{}.json'.format(app.config['COCO_SET']))
cocoCaptionDF = pd.read_csv(
app.config['COCOAPI_PATH'] + 'annotations/coco_captions.csv')
msrvttCaptionDF = pd.read_csv(app.config['MSRVTT_CAPTION_PATH'])
# Determine images and videos to update
im_names = [x for x in app.config['SAMPLES_TO_UPDATE'] if 'image' in x]
vid_names = [x for x in app.config['SAMPLES_TO_UPDATE'] if 'video' in x]
# Randomly select ids from their respective datasets and reject any that already have been
# chosen
rand_im_ids = cocoCaptionDF[cocoCaptionDF['set'] == app.config[
'COCO_SET']].sample(n=32)['id'].values.tolist()
rand_im_ids = [x for x in rand_im_ids if x not in samplesDF['id'].values.tolist()][
:len(im_names)]
rand_vid_ids = msrvttCaptionDF[msrvttCaptionDF['set'] == 'test'].sample(n=32)[
'vid_id'].values.tolist()
rand_vid_ids = [x for x in rand_vid_ids if x not in samplesDF['id'].values.tolist()][
:len(vid_names)]
# Generate sample information and store to file
for i, (name, im_id) in enumerate(zip(im_names, rand_im_ids)):
# Get image and generated caption
url = coco.loadImgs(im_id)[0]['coco_url']
caption = narrator.gen_caption(url, beam_size=8)
# Get all gt captions and encode/decode using vocabulary
gts = cocoCaptionDF[cocoCaptionDF['id'] == im_id]['caption']
gts = gts.apply(lambda x: narrator.coco_vocab.encode(
x, app.config['MAX_LEN'] + 1))
gts = gts.apply(lambda x: narrator.coco_vocab.decode(x, clean=True))
# Find nearest gt
nearest_gt = ''
best_score = 0.0
for gt in gts:
bleu = narrator.coco_vocab.evaluate([gt], caption)
if bleu > best_score:
best_score = bleu
nearest_gt = gt
gt = ' '.join(nearest_gt).capitalize()
caption = ' '.join(caption).capitalize()
# Load and save imge
im = PIL.Image.fromarray(io.imread(url)).convert('RGB')
im.save(app.config['SAMPLES_DIR'] + name + '.jpg')
# Generate audio files
narrator.gen_audio_file(
gt, app.config['SAMPLES_DIR'] + name + '_gt.ogg')
narrator.gen_audio_file(
caption, app.config['SAMPLES_DIR'] + name + '.ogg')
# Update samples dataframe
samplesDF.loc[name, 'id'] = im_id
samplesDF.loc[name, 'caption'] = caption
samplesDF.loc[name, 'gt'] = gt
print('Images updated!')
for i, (name, vid_id) in enumerate(zip(vid_names, rand_vid_ids)):
# Get video and generated caption
url = app.config['MSRVTT_DATA_PATH'] + vid_id + '.mp4'
caption = narrator.gen_caption(url, beam_size=8)
# Get all gt captions and encode/decode using vocabulary
gts = msrvttCaptionDF[msrvttCaptionDF['vid_id'] == vid_id]['caption']
gts = gts.apply(lambda x: narrator.msrvtt_vocab.encode(
x, app.config['MAX_LEN'] + 1))
gts = gts.apply(lambda x: narrator.msrvtt_vocab.decode(x, clean=True))
# Find the nearest gt
nearest_gt = ''
best_score = 0.0
for gt in gts:
bleu = narrator.msrvtt_vocab.evaluate([gt], caption)
if bleu > best_score:
best_score = bleu
nearest_gt = gt
gt = ' '.join(nearest_gt).capitalize()
caption = ' '.join(caption).capitalize()
# Copy image to samples directory
shutil.copy(url, app.config['SAMPLES_DIR'] + name + '.mp4')
# Generate audio files
narrator.gen_audio_file(
gt, app.config['SAMPLES_DIR'] + name + '_gt.ogg')
narrator.gen_audio_file(
caption, app.config['SAMPLES_DIR'] + name + '.ogg')
# update samples dataframe
samplesDF.loc[name, 'id'] = vid_id
samplesDF.loc[name, 'caption'] = caption
samplesDF.loc[name, 'gt'] = gt
print('Videos updated!')
# Save samples dataframe
samplesDF.to_csv(app.config['SAMPLES_DIR'] + 'sample_captions.csv')
# Load samples
IM_SAMPLES_DICT = [[], [], [], []]
VID_SAMPLES_DICT = [[], [], [], []]
for i, ix in enumerate(range(16)):
im_sample = samplesDF.loc['image' + str(ix)]
vid_sample = samplesDF.loc['video' + str(ix)]
IM_SAMPLES_DICT[int(i / 4)].append({
'id': im_sample['id'],
'url': 'image' + str(ix) + '.jpg',
'gt': im_sample['gt'],
'gt_audio': 'image' + str(ix) + '_gt.ogg',
'caption': im_sample['caption'],
'cap_audio': 'image' + str(ix) + '.ogg'
})
VID_SAMPLES_DICT[int(i / 4)].append({
'id': vid_sample['id'],
'url': 'video' + str(ix) + '.mp4',
'gt': vid_sample['gt'],
'gt_audio': 'video' + str(ix) + '_gt.ogg',
'caption': vid_sample['caption'],
'cap_audio': 'video' + str(ix) + '.ogg'
})
print("Samples loaded")
# Get filepath for scene example
scene_example_file = app.config[
'SAMPLES_DIR'] + app.config['SCENE_EXAMPLE_FILE']
# Create scene example if it doesn't already exist
if not os.path.exists(scene_example_file + '.csv'):
# Generate captions by scene
captions, scene_change_timecodes = narrator.gen_caption(
scene_example_file + '.mp4', by_scene=True, as_string=True)
# Create dataframe
sceneSamplesDF = pd.DataFrame({
'time': scene_change_timecodes,
'caption': captions
})
# Capitalize
sceneSamplesDF['caption'] = sceneSamplesDF[
'caption'].apply(lambda x: x.capitalize())
# Generate audio files for each caption
for i, caption in enumerate(captions):
narrator.gen_audio_file(
caption, scene_example_file + '.' + str(i) + '.ogg')
# Save samples dataframe
sceneSamplesDF.to_csv(scene_example_file + '.csv', index=False)
# Load samples dataframe
else:
sceneSamplesDF = pd.read_csv(scene_example_file + '.csv')
# Load scene example
SCENE_SAMPLES_DICT = []
for i, row in sceneSamplesDF.iterrows():
SCENE_SAMPLES_DICT.append({
'time': row['time'],
'cap_audio': app.config['SCENE_EXAMPLE_FILE'] + '.' + str(i) + '.ogg',
'caption': row['caption'].capitalize()
})
##############################################################################
##################################### APP ####################################
##############################################################################
def allowed_file(filename):
"""Determine if a file has an allowed extension."""
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
def save_file(file):
"""Save given file and return path."""
file_path = os.path.join(app.config['UPLOAD_DIR'], file.filename)
file.save(os.path.join(app.config['UPLOAD_DIR'], file.filename))
return file_path
def split_filename(file):
"""Split filename into name and ext."""
*filename, ext = file.filename.split('.')
if isinstance(filename, list):
filename = '_'.join(filename) # Replace existing . with _
return filename, ext
def determine_type(ext, by_scene):
"""Determine if image or video."""
if ext in app.config['VID_EXTENSIONS']:
if by_scene:
return 'scene'
return 'video'
return 'image'
def generate_caption(file, by_scene):
"""Generate caption for given file"""
file.filename = file.filename.replace(' ', '_')
@app.route('/')
@app.route('/index')
def index():
"""Render homepage."""
return render_template('main.html', page='main', title=app.config['TITLE'])
@app.route('/images')
def images():
"""Render image examples page."""
return render_template('images.html', im_dict=IM_SAMPLES_DICT, page='images',
title=app.config['TITLE'])
@app.route('/videos')
def videos():
"""Render video examples page."""
return render_template('videos.html', vid_dict=VID_SAMPLES_DICT, page='videos',
title=app.config['TITLE'])
@app.route('/scenes')
def scenes():
"""Render scene examples page."""
return render_template('scenes.html', page='scenes', scenes_dict=SCENE_SAMPLES_DICT,
title=app.config['TITLE'])
@app.route('/contact')
def contact():
"""Render contact me page."""
return render_template('contact.html', page='contact', title=app.config['TITLE'])
@app.route('/about')
def about():
"""Render about page."""
return render_template('about.html', page='about', title=app.config['TITLE'])
@app.route('/demo', methods=['GET', 'POST'])
def demo():
"""Render demo page."""
# Check if file is uploaded
if request.method == 'POST':
try:
# Grab file, and if by_scene is requested from website
file = request.files['file']
by_scene = 'by_scene' in request.form
# Check if filetype is allowed
if file and allowed_file(file.filename):
# Fix filename, save to file, get ext and determine type
file.filename = file.filename.replace(' ', '_')
file_path = save_file(file)
filename, ext = split_filename(file)
typ = determine_type(ext, by_scene)
if typ == 'image':
by_scene = False
# Generate caption/audio and redirect to demo_output page
if not by_scene:
caption = narrator.gen_caption(file_path,
beam_size=app.config['BEAM_SIZE'],
as_string=True,
by_scene=by_scene).capitalize()
cap_audio = filename + '.ogg'
narrator.gen_audio_file(
caption, app.config['UPLOAD_DIR'] + cap_audio)
return redirect(url_for('uploaded_file',
filename=file.filename,
cap_audio=cap_audio,
caption=caption,
typ=typ))
# Generate caption/audio by scene and redirect to demo_output
# page
captions, time_codes = narrator.gen_caption(file_path,
beam_size=app.config[
'BEAM_SIZE'],
as_string=True,
by_scene=by_scene)
scenes_dict = []
for i, caption in enumerate(captions):
narrator.gen_audio_file(caption,
app.config['UPLOAD_DIR'] +
filename + '.' + str(i) + '.ogg')
scenes_dict.append({
'time': time_codes[i],
'cap_audio': filename + '.' + str(i) + '.ogg',
'caption': caption.capitalize()
})
session['scenes_dict'] = scenes_dict
return redirect(url_for('uploaded_file',
filename=file.filename,
typ='scene',
caption='scene',
cap_audio='scene'))
except KeyError as e:
print(e)
return render_template('demo.html', page='demo', title=app.config['TITLE'])
@app.route('/demo/<filename>&<cap_audio>&<typ>&<caption>', methods=['GET', 'POST'])
def uploaded_file(filename, typ='image', caption="", cap_audio=None):
"""Render demo output page."""
# Duplicate of above -- allows
if request.method == 'POST':
try:
# Grab file, and if by_scene is requested from website
file = request.files['file']
by_scene = 'by_scene' in request.form
# Check if filetype is allowed
if file and allowed_file(file.filename):
# Fix filename, save to file, get ext and determine type
file.filename = file.filename.replace(' ', '_')
file_path = save_file(file)
filename, ext = split_filename(file)
typ = determine_type(ext, by_scene)
if typ == 'image':
by_scene = False
# Generate caption/audio and redirect to demo_output page
if not by_scene:
caption = narrator.gen_caption(file_path,
beam_size=app.config[
'BEAM_SIZE'],
as_string=True,
by_scene=by_scene).capitalize()
cap_audio = filename + '.ogg'
narrator.gen_audio_file(
caption, app.config['UPLOAD_DIR'] + cap_audio)
return redirect(url_for('uploaded_file',
filename=file.filename,
cap_audio=cap_audio,
caption=caption,
typ=typ))
# Generate caption/audio by scene and redirect to demo_output
# page
captions, time_codes = narrator.gen_caption(file_path,
beam_size=app.config[
'BEAM_SIZE'],
as_string=True,
by_scene=by_scene)
scenes_dict = []
for i, caption in enumerate(captions):
narrator.gen_audio_file(caption,
app.config['UPLOAD_DIR'] + filename +
'.' + str(i) + '.ogg')
scenes_dict.append({
'time': time_codes[i],
'cap_audio': filename + '.' + str(i) + '.ogg',
'caption': caption.capitalize()
})
session['scenes_dict'] = scenes_dict
return redirect(url_for('uploaded_file',
filename=file.filename,
typ='scene',
caption='scene',
cap_audio='scene'))
except KeyError as e:
print(e)
return render_template('demo_output.html',
filename=filename,
typ=typ,
caption=caption,
cap_audio=cap_audio,
page='demo',
title=app.config['TITLE'])
@app.route('/uploads/<filename>')
def get_upload(filename):
"""Get path to file in upload directory."""
return send_from_directory(app.config['UPLOAD_DIR'], filename)
@app.route('/samples/<filename>')
def get_sample(filename):
"""Get path to file in samples directory."""
return send_from_directory(app.config['SAMPLES_DIR'], filename)
| 2.140625 | 2 |
tests/test_collection.py | benthomasson/ansible-events | 0 | 12792159 | <gh_stars>0
from ansible_events.collection import (
find_collection,
split_collection_name,
find_source,
load_rules,
)
def test_find_collection():
location = find_collection("community.general")
assert location is not None
def test_find_collection_eda():
location = find_collection("benthomasson.eda")
assert location is not None
def test_find_source():
location = find_source(*split_collection_name("benthomasson.eda.range"))
assert location is not None
def test_load_rules():
rules = load_rules(*split_collection_name("benthomasson.eda.hello_events"))
assert rules is not None
| 1.90625 | 2 |
tests/test_person.py | FOR-THE-HORDE-OPUS/Orgrimmar | 0 | 12792160 | def test_update_person_name():
return
def test_update_person_address():
return
| 1.179688 | 1 |
color_detection.py | smitdesai31/Image-Color-Detection | 0 | 12792161 | from tkinter import *
import cv2
import numpy as np
import urllib.request
import pandas as pd
from tkinter import filedialog
from PIL import ImageTk,Image
import pyperclip as pc
root = Tk()
root.title("Image Color Detection")
root.geometry("936x536+300+130")
root.configure(bg='#243B53')
image_path = ""
def open():
global image_path
root.filename = filedialog.askopenfilename(initialdir=r"C:\Users\7p\Desktop\temp pypro\python-project-color-detection",title="Select an image file", filetypes=(("All files","*.*"),("jpg files","*.jpg"),("png files","*.png")))
image_path = root.filename
print(image_path)
# open select2 btn image
selectimg2 = Image.open("C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn2.png")
#resize btn image
resized2 = selectimg2.resize((200,50),Image.ANTIALIAS)
finalimg2 = ImageTk.PhotoImage(resized2)
my_btn.configure(image=finalimg2,state=DISABLED)
my_btn.image=finalimg2
root.configure(bg='#363062')
return image_path
image_url=StringVar()
def urlimg():
imgurl = image_url.get()
url_response = urllib.request.urlopen(imgurl)
img_array = np.array(bytearray(url_response.read()), dtype=np.uint8)
urlimg.image = cv2.imdecode(img_array,-1)
image_url.set("")
root.destroy()
# open urllabel btn image
urllabel = Image.open("C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urllabel.png")
#resize btn image
resized3 = urllabel.resize((100,50),Image.ANTIALIAS)
finalimg3 = ImageTk.PhotoImage(resized3)
img_label = Label(root, image=finalimg3,borderwidth=0,bg='#243B53').place(x=150,y=260)
# open urlopen btn image
urlopen = Image.open("C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/urlopen.png")
#resize btn image
resized4 = urlopen.resize((200,50),Image.ANTIALIAS)
finalimg4 = ImageTk.PhotoImage(resized4)
url_btn=Button(root,image=finalimg4, command = urlimg,borderwidth=0,bg='#243B53').place(x=590,y=260)
img_entry = Entry(root,textvariable = image_url,width=12,font=('Roboto',26)).place(x=300,y=260)
# open select btn image
selectimg = Image.open("C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/selectbtn.png")
#resize btn image
resized = selectimg.resize((200,50),Image.ANTIALIAS)
finalimg = ImageTk.PhotoImage(resized)
my_btn = Button(root,image=finalimg,command=open,borderwidth=0,bg='#243B53')
my_btn.place(x=100,y=150)
# open start btn image
openimg = Image.open("C:/Users/7p/Desktop/temp pypro/python-project-color-detection/buttons/startbtn1.png")
#resize btn image
resized1 = openimg.resize((118,50),Image.ANTIALIAS)
finalimg1 = ImageTk.PhotoImage(resized1)
strt_btn = Button(root,image=finalimg1,command=root.quit,borderwidth=0,bg='#243B53').place(x=620,y=155)
root.mainloop()
if image_path == "":
img = urlimg.image
else :
#Reading the image with opencv
img = cv2.imread(image_path)
#declaring global variables (are used later on)
clicked = False
r = g = b = hexcode = xpos = ypos = 0
#Reading csv file with pandas and giving names to each column
index=["color","color_name","hex","R","G","B"]
csv = pd.read_csv('C:/Users/7p/Desktop/temp pypro/python-project-color-detection/colors.csv', names=index, header=None)
#function to calculate minimum distance from all colors and get the most matching color
def getColorName(R,G,B):
minimum = 10000
for i in range(len(csv)):
d = abs(R- int(csv.loc[i,"R"])) + abs(G- int(csv.loc[i,"G"]))+ abs(B- int(csv.loc[i,"B"]))
if(d<=minimum):
minimum = d
getColorName.cname = csv.loc[i,"color_name"]
getColorName.hexcode = csv.loc[i,"hex"]
#function to get x,y coordinates of mouse double click
def draw_function(event, x,y,flags,param):
if event == cv2.EVENT_LBUTTONDBLCLK:
global b,g,r,xpos,ypos, clicked
clicked = True
xpos = x
ypos = y
b,g,r = img[y,x]
b = int(b)
g = int(g)
r = int(r)
cv2.namedWindow('Image Color Detection')
cv2.setMouseCallback('Image Color Detection',draw_function)
while(1):
cv2.imshow("Image Color Detection",img)
if (clicked):
#scale text according to image size
imageWidth = img.shape[0]
imageHeight = img.shape[1]
fontScale = min(imageWidth,imageHeight)/(800)
#cv2.rectangle(image, startpoint, endpoint, color, thickness)-1 fills entire rectangle
cv2.rectangle(img,(50,10), (max(imageWidth,imageHeight),50), (b,g,r), -1)
getColorName(r,g,b)
#Creating text string to display( Color name and RGB values )
text = getColorName.cname + ' R='+ str(r) + ' G='+ str(g) + ' B='+ str(b) +" "+ getColorName.hexcode
#copying color code to clipboard
pc.copy(getColorName.hexcode)
#scale text according to image size
imageWidth = img.shape[0]
imageHeight = img.shape[1]
fontScale = min(imageWidth,imageHeight)/(800)
#cv2.putText(img,text,start,font(0-7),fontScale,color,thickness,lineType )
cv2.putText(img, text,(50,40),2,fontScale,(255,255,255),1,cv2.LINE_AA)
#For very light colours we will display text in black colour
if(r+g+b>=600):
cv2.putText(img, text,(50,40),2,fontScale,(0,0,0),1,cv2.LINE_AA)
clicked=False
#Break the loop when User hits 'enter' key
if cv2.waitKey(20) & 0xFF ==13:
break
cv2.destroyAllWindows()
| 3.109375 | 3 |
GM_brightness_metric/test/test_read_video.py | quosi/image_analysis | 0 | 12792162 | import pytest, logging
from read_video import read_video
def test_read_video():
test_data = "/Users/pepper/Projekte/PythonProjects/GM_brightness_metric/resources/video/Brosserness_4sec_h264_1920x1080_24fps_2Ch-stereo.mp4"
#logging.info('ERROR')
i = 0
for frame in read_video(test_data):
logging.info("Output in loop")
assert frame is not None, "fail"
i+=1
assert i>0
| 2.34375 | 2 |
hackerrank/xor-sequence.py | ichko/DataStructures | 3 | 12792163 | <reponame>ichko/DataStructures
#!/bin/python3
"""
Xor-sequence
SRC - https://www.hackerrank.com/challenges/xor-se/problem
Sample Input 0:
3
2 4
2 8
5 9
Sample Output 0:
7
9
15
"""
"""
11110 11110 30 - Числото
11100 00010 2 - две
11010 11000 24 - числото = числото - 6
11000 00000 0 - нула
10110 10110 22 - числото = числото - 2
10100 00010 2 - две
10010 10000 16 - числото = числото - 6
10000 00000 0 - нула
01110 01110 14 - числото = числото - 2
01100 00010 2 - две
01010 01000 8 - числото = числото - 6
01000 00000 0 - нула
00110 00110 6 - числото = числото - 2
00100 00010 2 - две (отговор)
"""
import math
import os
import random
import re
import sys
MAX_BITS = 17
def count_ones(n, bit_id):
ones_in_group = 2**bit_id
group_size = ones_in_group * 2
num_groups = (n + 1) // group_size
ones_in_groups = num_groups * ones_in_group
rem_ones = 0
rem = (n + 1) % group_size
if rem > group_size // 2:
rem_ones = rem - group_size // 2
return ones_in_groups + rem_ones
def get_bit_vector_ones_counts(n, max_bit):
ones_cnts = []
for i in range(0, max_bit + 1):
num_ones = count_ones(n, bit_id=i)
ones_cnts.append(num_ones)
return ones_cnts
def A_n(n):
n_bits = get_bit_vector_ones_counts(n, max_bit=MAX_BITS)
result = 0
for i, num_ones in enumerate(n_bits):
if num_ones % 2 != 0:
result += 2**i
return result
def xor_sequence(l, r):
l_bits = get_bit_vector_ones_counts(l - 1, max_bit=MAX_BITS)
r_bits = get_bit_vector_ones_counts(r, max_bit=MAX_BITS)
result = 0
for i, (l, r) in enumerate(zip(l_bits, r_bits)):
if (r - l) % 2 != 0:
result += 2**i
return result
def xor_sequence_2(l, r):
result = 0
for i in range(l + 1, r + 1, 2):
result ^= i
if (r - l + 1) % 2 != 0:
return A_n(r) ^ result
return result
def xor_sequence_3(l, r):
l_bits = get_bit_vector_ones_counts(l + 1, max_bit=MAX_BITS)
r_bits = get_bit_vector_ones_counts(r, max_bit=MAX_BITS)
result = 0
for i, (l, r) in enumerate(zip(l_bits, r_bits)):
l //= 2
r //= 2
if (r - l) % 2 != 0:
result += 2**i
return result
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
l, r = 2, 31
a_l = format(A_n(l), '04b')
a_r = format(A_n(r), '04b')
s = 0
for j in range(r, l, -2):
s ^= j
jf = format(j, '05b')
sf = format(s, '05b')
print(j, jf, sf, s)
if (l - r + 1) % 2 != 0:
al = A_n(l)
s ^= al
print(j, jf, sf, s)
for q_itr in range(q):
lr = input().split()
l = int(lr[0])
r = int(lr[1])
result = xor_sequence_3(l, r)
fptr.write(str(result) + '\n')
fptr.close()
| 3.15625 | 3 |
examples/pyglet_hello_world.py | rdeioris/compushady | 17 | 12792164 | import pyglet
import platform
import struct
from ctypes import addressof,pointer
import ctypes
from compushady import HEAP_UPLOAD, Swapchain, Compute, Texture2D, Buffer
from compushady.formats import B8G8R8A8_UNORM
from compushady.shaders import hlsl
if platform.system() != 'Windows':
raise Exception('only Windows is supported for this example')
window = pyglet.window.Window()
swapchain = Swapchain(window._hwnd, B8G8R8A8_UNORM, 3)
target = Texture2D(window.width, window.height, B8G8R8A8_UNORM)
clear_screen = Compute(hlsl.compile("""
RWTexture2D<float4> target : register(u0);
[numthreads(8,8,1)]
void main(uint3 tid : SV_DispatchThreadID)
{
target[tid.xy] = float4(1, 0, 0, 1);
}
"""), uav=[target])
constant_buffer = Buffer(8, HEAP_UPLOAD)
quad = Compute(hlsl.compile("""
struct Quad
{
uint x;
uint y;
};
ConstantBuffer<Quad> quad : register(b0);
RWTexture2D<float4> target : register(u0);
[numthreads(8,8,1)]
void main(uint3 tid : SV_DispatchThreadID)
{
target[tid.xy + uint2(quad.x, quad.y)] = float4(0, 1, 1, 1);
}
"""), cbv=[constant_buffer], uav=[target])
x = 0
y = 0
def update(dt):
global x, y
x += 1
y += 1
if x > window.width:
x = 0
if y > window.height:
y = 0
constant_buffer.upload(struct.pack('II', x, y))
@window.event
def on_draw():
clear_screen.dispatch(window.width // 8, window.height // 8, 1)
quad.dispatch(1, 1, 1)
swapchain.present(target)
pyglet.clock.schedule_interval(update, 1/120.0)
pyglet.app.run()
| 2.328125 | 2 |
core/migrations/remove_provider_dns_server_ip_model.py | simpsonw/atmosphere | 197 | 12792165 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-09-07 21:53
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', 'remove_atmosphereuser_selected_identity'),
]
operations = [
migrations.AlterUniqueTogether(
name='providerdnsserverip',
unique_together=set([]),
),
migrations.RemoveField(
model_name='providerdnsserverip',
name='provider',
),
migrations.DeleteModel(name='ProviderDNSServerIP', ),
]
| 1.546875 | 2 |
flyer/decoder.py | FIXFlyer/pyflyer | 2 | 12792166 | <gh_stars>1-10
#! /usr/bin/env python
#-----------------------------------------------------------------------
# COPYRIGHT_BEGIN
# Copyright (C) 2017, FixFlyer, LLC.
# All rights reserved.
# COPYRIGHT_END
#-----------------------------------------------------------------------
"""Protocol decoder."""
import flyer
MARKER = "50015=EOF\x01"
MARKER_LEN = len(MARKER)
class Decoder(object):
"""Protocol decoder."""
def __init__(self):
self._listener = None
self._buf = ""
return
def set_listener(self, listener):
self._listener = listener
return
def receive_bytes(self, buf, buflen):
self._buf += buf[:buflen]
return
def get_message(self):
pos = self._buf.find(MARKER)
if pos < 0:
return
msg = self._buf[0:pos + MARKER_LEN]
self._buf = self._buf[pos + MARKER_LEN:]
return msg
def dispatch(self, msg):
msg_type = getInt32("50001=", msg, "\x01")
if msg_type < 0:
#FIXME: log
return
if msg_type == flyer.protocol.PAYLOAD_MESSAGE_TYPE:
return self.decode_payload_message(msg)
elif msg_type == flyer.protocol.HEARTBEAT_MESSAGE_TYPE:
return self.decode_heartbeat_message(msg)
elif msg_type == flyer.protocol.COMMIT_MESSAGE_TYPE:
return self.decode_commit_message(msg)
elif msg_type == flyer.protocol.COMMON_MESSAGE_TYPE:
event_type = getInt32("50011=", msg, "\x01")
if event_type < 0:
#FIXME: log
return
if event_type == flyer.protocol.LOGON_RESPONSE_EVENT_ID:
return self.decode_logon_response_message(msg)
elif event_type == flyer.protocol.SESSION_LOGON_EVENT_ID:
return self.decode_session_logon_message(msg)
elif event_type == flyer.protocol.SESSION_LOGOUT_EVENT_ID:
return self.decode_session_logout_message(msg)
elif event_type == flyer.protocol.RESEND_EVENT_ID:
return self.decode_resend_message(msg)
elif event_type == flyer.protocol.ERROR_EVENT_ID:
return self.decode_error_message(msg)
else:
#FIXME: log
return
else:
#FIXME: log
return
def decode_payload_message(self, msg):
return
def decode_heartbeat_message(self, msg):
event = OnHeartbeatEvent()
event.id = getString("50002=", msg, "\x01")
self._listener.on_heartbeat(event)
return
def decode_commit_message(self, msg):
return
def decode_logon_response_message(self, msg):
event = OnLogonEvent()
result = getString("Success=", msg, "\r\n")
event.success = (result == "true")
self._listener.on_logon(event)
return
def decode_session_logon_message(self, msg):
return
def decode_session_logout_message(self, msg):
return
def decode_resend_message(self, msg):
return
def decode_error_message(self, msg):
return
| 2.296875 | 2 |
src/xbrief/margin/matrix_margin/__init__.py | pydget/xbrief | 0 | 12792167 | <reponame>pydget/xbrief
from .matrix_margin import MatrixMargin
from .sizing import sizing
| 0.976563 | 1 |
event_input_cap_example.py | dbgraybeal/pysep | 6 | 12792168 | import obspy
import read_event_obspy_file as reof
from getwaveform import *
def get_ev_info(ev_info,iex):
# ===============================================================
# SilwalTape2016 example event (Anchorage)
if iex == 0:
ev_info.use_catalog = 0
ev_info.otime = obspy.UTCDateTime("2009-04-07T20:12:55.351")
ev_info.min_dist = 0
ev_info.max_dist = 300
ev_info.tbefore_sec = 100
ev_info.tafter_sec = 300
#output all proccessing steps
ev_info.ifverbose = True
#keep stations with missing components and fill the missing component with a null trace (MPEN)
#Be sure to set the null component to 0 in the weight file when running cap
#ev_info.icreateNull = 1
ev_info.icreateNull = 0
#RAW and ENZ files can be used when checking if you are receiving all the data ($PYSEP/check_getwaveform.bash)
ev_info.isave_raw = False
ev_info.isave_raw_processed = False
#ev_info.isave_raw = True
#ev_info.isave_raw_processed = True
ev_info.isave_ENZ = False
#ev_info.isave_ENZ = True
#ev_info.min_lat = 59
#ev_info.max_lat = 62
#ev_info.min_lon = -152
#ev_info.max_lon = -147
# default list of Alaska networks
# note 1: cannot use '*' because of IM
# note 2: may want to exclude the mid-band AV network
# note 3: these are temporary:
# XE BEAAR 1999
# XR ARCTIC 2004
# XZ STEEP 2005
# YV MOOS 2006
# XV FLATS 2014
# <NAME> 2015
# XG WVF 2016
# [7C MMEP 2015]
# TA
#ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,XV,XZ,YV'
#ev_info.network = 'AK' # for testing
ev_info.network = 'AK,AT,AV,CN,II,IU,US,XM,TA,XE,XR,XZ,YV,XV,ZE,XG'
ev_info.channel = 'BH?'
ev_info.use_catalog = 0
ev_info.elat = 61.45420
ev_info.elon = -149.7428
ev_info.edep = 33033.60
# ev_info.rlat = 61.45420
# ev_info.rlon = -149.7428
# ev_info.rtime = obspy.UTCDateTime("2009-04-07T20:12:55.351")
ev_info.emag = 4.6
ev_info.resample_freq = 50
ev_info.scale_factor = 100
#ev_info.phase_window = False
#-------for specfem------------
#ev_info.tbefore_sec = 0
#ev_info.resample_TF = False
#ev_info.scale_factor = 1
#ev_info.outformat = 'DISP'
#------------------------------
return(ev_info)
| 2.21875 | 2 |
rubato/utils/COL_TYPE.py | tinmarr/Alien-Fisher-Man | 0 | 12792169 | <reponame>tinmarr/Alien-Fisher-Man
from enum import Enum
class COL_TYPE(Enum):
ELASTIC = 1
STATIC = 2
| 1.882813 | 2 |
spo/spo/doctype/medizinischer_bericht/medizinischer_bericht.py | libracore/spo | 0 | 12792170 | <gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2019, libracore and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils.data import formatdate
class MedizinischerBericht(Document):
pass
# def validate(self):
# for ausgangslage in self.ausgangslage:
# if ausgangslage.krankengeschichte:
# ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace("<br>", "")
# ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace("</div>", "<br>")
# ausgangslage.krankengeschichte = ausgangslage.krankengeschichte.replace("<div>", "")
# if ausgangslage.bemerkung:
# ausgangslage.bemerkung = ausgangslage.bemerkung.replace("<br>", "")
# ausgangslage.bemerkung = ausgangslage.bemerkung.replace("</div>", "<br>")
# ausgangslage.bemerkung = ausgangslage.bemerkung.replace("<div>", "")
# for korrespondenz in self.korrespondenz:
# if korrespondenz.wortlaut:
# korrespondenz.wortlaut = korrespondenz.wortlaut.replace("<br>", "")
# korrespondenz.wortlaut = korrespondenz.wortlaut.replace("</div>", "<br>")
# korrespondenz.wortlaut = korrespondenz.wortlaut.replace("<div>", "")
# if korrespondenz.bemerkung:
# korrespondenz.bemerkung = korrespondenz.bemerkung.replace("<br>", "")
# korrespondenz.bemerkung = korrespondenz.bemerkung.replace("</div>", "<br>")
# korrespondenz.bemerkung = korrespondenz.bemerkung.replace("<div>", "")
@frappe.whitelist()
def get_deckblat_data(mandat):
data = {}
if mandat:
mandat = frappe.get_doc("Mandat", mandat)
if mandat.kontakt:
patienten_kontakt = frappe.get_doc("Contact", mandat.kontakt)
data["name_klient"] = patienten_kontakt.first_name + " " + patienten_kontakt.last_name
data["geburtsdatum_klient"] = formatdate(string_date=patienten_kontakt.geburtsdatum, format_string='dd.mm.yyyy')
else:
data["name_klient"] = ''
data["geburtsdatum_klient"] = ''
employee = frappe.db.sql("""SELECT `name` FROM `tabEmployee` WHERE `user_id` = '{owner}'""".format(owner=frappe.session.user), as_dict=True)
if len(employee) > 0:
data["beraterin"] = employee[0].name
else:
data["beraterin"] = ''
if mandat.rsv:
data["rsv"] = mandat.rsv
else:
data["rsv"] = ''
if mandat.rsv_kontakt:
data["rsv_kontakt"] = mandat.rsv_kontakt
else:
data["rsv_kontakt"] = ''
return data
else:
return False | 2.078125 | 2 |
waterbutler/core/streams/zip.py | laurenrevere/waterbutler | 0 | 12792171 | <reponame>laurenrevere/waterbutler
import asyncio
import binascii
import struct
import time
import zipfile
import zlib
from waterbutler.core.streams import BaseStream
from waterbutler.core.streams import MultiStream
from waterbutler.core.streams import StringStream
class ZipLocalFileDescriptor(BaseStream):
"""The descriptor (footer) for a local file in a zip archive
Note: This class is tightly coupled to ZipStreamReader, and should not be
used separately
"""
def __init__(self, file):
super().__init__()
self.file = file
@property
def size(self):
return 0
@asyncio.coroutine
def _read(self, *args, **kwargs):
"""Create 16 byte descriptor of file CRC, file size, and compress size"""
self._eof = True
return self.file.descriptor
class ZipLocalFileData(BaseStream):
"""A thin stream wrapper, used to update a ZipLocalFile as chunks are read
Note: This class is tightly coupled to ZipStreamReader, and should not be
used separately
"""
def __init__(self, file, stream, *args, **kwargs):
self.file = file
self.stream = stream
self._buffer = bytearray()
super().__init__(*args, **kwargs)
@property
def size(self):
return 0
@asyncio.coroutine
def _read(self, n=-1, *args, **kwargs):
if callable(self.stream):
self.stream = yield from (self.stream())
ret = self._buffer
while (n == -1 or len(ret) < n) and not self.stream.at_eof():
chunk = yield from self.stream.read(n, *args, **kwargs)
# Update file info
self.file.original_size += len(chunk)
self.file.zinfo.CRC = binascii.crc32(chunk, self.file.zinfo.CRC)
# compress
compressed = self.file.compressor.compress(chunk)
compressed += self.file.compressor.flush(
zlib.Z_FINISH if self.stream.at_eof() else zlib.Z_SYNC_FLUSH
)
# Update file info
self.file.compressed_size += len(compressed)
ret += compressed
# buffer any overages
if n != -1 and len(ret) > n:
self._buffer = ret[n:]
ret = ret[:n]
else:
self._buffer = bytearray()
# EOF is the buffer and stream are both empty
if not self._buffer and self.stream.at_eof():
self.feed_eof()
return bytes(ret)
class ZipLocalFile(MultiStream):
"""A local file in a zip archive
Note: This class is tightly coupled to ZipStreamReader, and should not be
used separately
"""
def __init__(self, file_tuple):
filename, stream = file_tuple
filename = filename.strip('/')
# Build a ZipInfo instance to use for the file's header and footer
self.zinfo = zipfile.ZipInfo(
filename=filename,
date_time=time.localtime(time.time())[:6],
)
self.zinfo.compress_type = zipfile.ZIP_DEFLATED
self.zinfo.external_attr = 0o600 << 16
self.zinfo.header_offset = 0
self.zinfo.flag_bits |= 0x08
# Initial CRC: value will be updated as file is streamed
self.zinfo.CRC = 0
# define a compressor
self.compressor = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED,
-15,
)
# meta information - needed to build the footer
self.original_size = 0
self.compressed_size = 0
super().__init__(
StringStream(self.local_header),
ZipLocalFileData(self, stream),
ZipLocalFileDescriptor(self),
)
@property
def local_header(self):
"""The file's header, for inclusion just before the content stream"""
return self.zinfo.FileHeader(zip64=False)
@property
def directory_header(self):
"""The file's header, for inclusion in the archive's central directory
"""
dt = self.zinfo.date_time
# modification date/time, in MSDOS format
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra_data = self.zinfo.extra
filename, flag_bits = self.zinfo._encodeFilenameFlags()
centdir = struct.pack(
zipfile.structCentralDir,
zipfile.stringCentralDir,
self.zinfo.create_version,
self.zinfo.create_system,
self.zinfo.extract_version,
self.zinfo.reserved,
flag_bits,
self.zinfo.compress_type,
dostime, # modification time
dosdate,
self.zinfo.CRC,
self.compressed_size,
self.original_size,
len(self.zinfo.filename.encode('utf-8')),
len(extra_data),
len(self.zinfo.comment),
0,
self.zinfo.internal_attr,
self.zinfo.external_attr,
self.zinfo.header_offset,
)
return centdir + filename + extra_data + self.zinfo.comment
@property
def descriptor(self):
"""Local file data descriptor"""
fmt = '<4sLLL'
signature = b'PK\x07\x08' # magic number for data descriptor
return struct.pack(
fmt,
signature,
self.zinfo.CRC,
self.compressed_size,
self.original_size,
)
@property
def total_bytes(self):
"""Length, in bytes, of output. Includes header and footer
Note: This should be access after the file's data has been streamed.
"""
return (
len(self.local_header) +
self.compressed_size +
len(self.descriptor)
)
class ZipArchiveCentralDirectory(BaseStream):
"""The central directory for a zip archive
Note: This class is tightly coupled to ZipStreamReader, and should not be
used separately
"""
def __init__(self, files, *args, **kwargs):
super().__init__()
self.files = files
@property
def size(self):
return 0
@asyncio.coroutine
def _read(self, n=-1):
file_headers = []
cumulative_offset = 0
for file in self.files:
file.zinfo.header_offset = cumulative_offset
file_headers.append(file.directory_header)
cumulative_offset += file.total_bytes
file_headers = b''.join(file_headers)
count = len(self.files)
endrec = struct.pack(
zipfile.structEndArchive,
zipfile.stringEndArchive,
0,
0,
count,
count,
len(file_headers),
cumulative_offset,
0,
)
self.feed_eof()
return b''.join((file_headers, endrec))
class ZipStreamReader(MultiStream):
"""Combines one or more streams into a single, Zip-compressed stream"""
def __init__(self, *streams):
# Each incoming stream should be wrapped in a _ZipFile instance
streams = [ZipLocalFile(each) for each in streams]
# Append a stream for the archive's footer (central directory)
streams.append(ZipArchiveCentralDirectory(streams.copy()))
super().__init__(*streams)
| 2.65625 | 3 |
img2art.py | kartikanand/img2art | 0 | 12792172 | import shutil
import sys
from PIL import Image
def get_term_width():
""" return terminal width
this function depends upon shutil.get_terminal_size
this works only on Python >= 3.3
"""
return shutil.get_terminal_size().columns
def get_aspect_ratio(img):
""" return the aspect ratio of given image
ar = width//height
return an int, we don't care about exact ratios
"""
width, height = img.size
aspect_ratio = width//height
if aspect_ratio == 0:
aspect_ratio = 1
return aspect_ratio
def get_height(width, aspect_ratio):
""" return height with respect to given aspect ratio """
return width//aspect_ratio
def resize_img(img):
""" return a resized image
resize acc. to given terminal width
keeping in mind the original aspect ratio
"""
term_width = get_term_width()
# divide by 2 because we use 2 characters per pixel
width = term_width//2
aspect_ratio = get_aspect_ratio(img)
height = get_height(width, aspect_ratio)
return img.resize((width, height))
def draw_ascii(img):
""" draw ascii art from the provided image
use # for black
use . for white
before drawing, convert the image to black and white
then resize it according to terminal width
"""
# convert image to black and white
img = img.convert('L')
# resize image to match terminal width and aspect ratio
img = resize_img(img)
width, height = img.size
for y in range(height):
for x in range(width):
if img.getpixel((x, y)) < 15:
print('# ', end='')
else:
print('. ', end='')
print()
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Please enter an image name as argument')
sys.exit(1)
img_file = sys.argv[1]
try:
img = Image.open(img_file)
draw_ascii(img)
except IOError:
print('Enter correct file')
sys.exit(1)
| 3.828125 | 4 |
functions/Melody.py | Skentir/CSC617M | 0 | 12792173 | class Melody():
def __init__(self, notes):
self.notes = [] | 1.757813 | 2 |
rpiatipo/EventsTest.py | m-c0d3/rpiatipo | 0 | 12792174 | from unittest import TestCase
from unittest.mock import patch, Mock
from rpiatipo.Events import Event, EventService
class EventsTest(TestCase):
@patch('rpiatipo.Events.EventService')
def setUp(self, MockEventService):
self.event = Event(type="test", data={"data": 1})
self.eventService = MockEventService()
self.eventService.create.return_value = self.event
def test_CreateEvent_EventService(self):
response = self.eventService.create()
self.assertIsInstance(response, Event)
def test_GetIdEvent_Success_EventService(self):
self.eventService.getId.side_effect = self.side_effect("1")
response = self.eventService.getId()
self.assertIsInstance(response, Event)
def test_GetIdEvent_NotFound_EventService(self):
self.eventService.getId.side_effect = self.side_effect("0")
response = self.eventService.getId()
self.assertNotIsInstance(response, Event)
def side_effect(self, id):
if (id=="1"):
return [self.event]
else:
return None | 2.890625 | 3 |
nn_dataflow/tests/unit_test/test_resource.py | joeshow79/nn_dataflow | 0 | 12792175 | """ $lic$
Copyright (C) 2016-2019 by The Board of Trustees of Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
import unittest
from nn_dataflow.core import NodeRegion
from nn_dataflow.core import PhyDim2
from nn_dataflow.core import Resource
class TestResource(unittest.TestCase):
''' Tests for Resource. '''
def setUp(self):
self.proc_region = NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0),
type=NodeRegion.PROC)
self.dram_region = NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0),
type=NodeRegion.DRAM)
self.src_data_region = NodeRegion(dim=PhyDim2(2, 1),
origin=PhyDim2(0, 0),
type=NodeRegion.DRAM)
self.dst_data_region = NodeRegion(dim=PhyDim2(2, 1),
origin=PhyDim2(0, 1),
type=NodeRegion.DRAM)
def test_valid_args(self):
''' Valid arguments. '''
resource = Resource(proc_region=self.proc_region,
dram_region=self.dram_region,
src_data_region=self.src_data_region,
dst_data_region=self.dst_data_region,
dim_array=PhyDim2(16, 16),
size_gbuf=131072,
size_regf=512,
array_bus_width=8,
dram_bandwidth=128,
no_time_mux=False,
)
self.assertTupleEqual(resource.proc_region.dim, (2, 2), 'proc_region')
self.assertTupleEqual(resource.dram_region.dim, (2, 2), 'dram_region')
self.assertTupleEqual(resource.dim_array, (16, 16), 'dim_array')
self.assertEqual(resource.size_gbuf, 131072, 'size_gbuf')
self.assertEqual(resource.size_regf, 512, 'size_regf')
self.assertEqual(resource.array_bus_width, 8, 'array_bus_width')
self.assertEqual(resource.dram_bandwidth, 128, 'dram_bandwidth')
self.assertFalse(resource.no_time_mux, 'no_time_mux')
def test_invalid_proc_region(self):
''' Invalid proc_region. '''
with self.assertRaisesRegexp(TypeError, 'Resource: .*proc_region.*'):
_ = Resource(proc_region=PhyDim2(2, 2),
dram_region=self.dram_region,
src_data_region=self.src_data_region,
dst_data_region=self.dst_data_region,
dim_array=PhyDim2(16, 16),
size_gbuf=131072,
size_regf=512,
array_bus_width=8,
dram_bandwidth=128,
no_time_mux=False,
)
def test_invalid_proc_region_dram(self):
''' Invalid proc_region with type DRAM. '''
with self.assertRaisesRegexp(ValueError, 'Resource: .*proc_.*type.*'):
_ = Resource(proc_region=NodeRegion(dim=PhyDim2(2, 2),
origin=PhyDim2(0, 0),
type=NodeRegion.DRAM),
dram_region=self.dram_region,
src_data_region=self.src_data_region,
dst_data_region=self.dst_data_region,
dim_array=PhyDim2(16, 16),
size_gbuf=131072,
size_regf=512,
array_bus_width=8,
dram_bandwidth=128,
no_time_mux=False,
)
def test_invalid_dram_region(self):
''' Invalid dram_region. '''
with self.assertRaisesRegexp(TypeError, 'Resource: .*dram_region.*'):
_ = Resource(proc_region=self.proc_region,
dram_region=PhyDim2(2, 2),
src_data_region=self.src_data_region,
dst_data_region=self.dst_data_region,
dim_array=PhyDim2(16, 16),
size_gbuf=131072,
size_regf=512,
array_bus_width=8,
dram_bandwidth=128,
no_time_mux=False,
)
def test_invalid_dram_region_proc(self):
''' Invalid dram_region with type DRAM. '''
with self.assertRaisesRegexp(ValueError, 'Resource: .*dram_.*type.*'):
_ = Resource(proc_region=self.proc_region,
dram_region=NodeRegion(dim=PhyDim2(2, 2),
origin=PhyDim2(0, 0),
type=NodeRegion.PROC),
src_data_region=self.src_data_region,
dst_data_region=self.dst_data_region,
dim_array=PhyDim2(16, 16),
size_gbuf=131072,
size_regf=512,
array_bus_width=8,
dram_bandwidth=128,
no_time_mux=False,
)
def test_invalid_data_region(self):
''' Invalid src/dst_proc_region. '''
with self.assertRaisesRegexp(TypeError, 'Resource: .*src_data_.*'):
_ = Resource(proc_region=self.proc_region,
dram_region=self.dram_region,
src_data_region=PhyDim2(2, 1),
dst_data_region=self.dst_data_region,
dim_array=PhyDim2(16, 16),
size_gbuf=131072,
size_regf=512,
array_bus_width=8,
dram_bandwidth=128,
no_time_mux=False,
)
with self.assertRaisesRegexp(TypeError, 'Resource: .*dst_data_.*'):
_ = Resource(proc_region=self.proc_region,
dram_region=self.dram_region,
src_data_region=self.src_data_region,
dst_data_region=PhyDim2(2, 1),
dim_array=PhyDim2(16, 16),
size_gbuf=131072,
size_regf=512,
array_bus_width=8,
dram_bandwidth=128,
no_time_mux=False,
)
def test_invalid_dim_array(self):
''' Invalid dim_array. '''
with self.assertRaisesRegexp(TypeError, 'Resource: .*dim_array.*'):
_ = Resource(proc_region=self.proc_region,
dram_region=self.dram_region,
src_data_region=self.src_data_region,
dst_data_region=self.dst_data_region,
dim_array=(16, 16),
size_gbuf=131072,
size_regf=512,
array_bus_width=8,
dram_bandwidth=128,
no_time_mux=False,
)
def test_invalid_size_gbuf(self):
''' Invalid size_gbuf. '''
with self.assertRaisesRegexp(TypeError, 'Resource: .*size_gbuf.*'):
_ = Resource(proc_region=self.proc_region,
dram_region=self.dram_region,
src_data_region=self.src_data_region,
dst_data_region=self.dst_data_region,
dim_array=PhyDim2(16, 16),
size_gbuf=(131072,),
size_regf=512,
array_bus_width=8,
dram_bandwidth=128,
no_time_mux=False,
)
def test_invalid_size_regf(self):
''' Invalid size_regf. '''
with self.assertRaisesRegexp(TypeError, 'Resource: .*size_regf.*'):
_ = Resource(proc_region=self.proc_region,
dram_region=self.dram_region,
src_data_region=self.src_data_region,
dst_data_region=self.dst_data_region,
dim_array=PhyDim2(16, 16),
size_gbuf=131072,
size_regf=(512,),
array_bus_width=8,
dram_bandwidth=128,
no_time_mux=False,
)
def test_invalid_array_bus_width(self):
''' Invalid array_bus_width. '''
with self.assertRaisesRegexp(TypeError,
'Resource: .*array_bus_width.*'):
_ = Resource(proc_region=self.proc_region,
dram_region=self.dram_region,
src_data_region=self.src_data_region,
dst_data_region=self.dst_data_region,
dim_array=PhyDim2(16, 16),
size_gbuf=131072,
size_regf=512,
array_bus_width=1.2,
dram_bandwidth=128,
no_time_mux=False,
)
with self.assertRaisesRegexp(ValueError,
'Resource: .*array_bus_width.*'):
_ = Resource(proc_region=self.proc_region,
dram_region=self.dram_region,
src_data_region=self.src_data_region,
dst_data_region=self.dst_data_region,
dim_array=PhyDim2(16, 16),
size_gbuf=131072,
size_regf=512,
array_bus_width=-2,
dram_bandwidth=128,
no_time_mux=False,
)
with self.assertRaisesRegexp(ValueError,
'Resource: .*array_bus_width.*'):
_ = Resource(proc_region=self.proc_region,
dram_region=self.dram_region,
src_data_region=self.src_data_region,
dst_data_region=self.dst_data_region,
dim_array=PhyDim2(16, 16),
size_gbuf=131072,
size_regf=512,
array_bus_width=0,
dram_bandwidth=128,
no_time_mux=False,
)
def test_invalid_dram_bandwidth(self):
''' Invalid dram_bandwidth. '''
with self.assertRaisesRegexp(TypeError,
'Resource: .*dram_bandwidth.*'):
_ = Resource(proc_region=self.proc_region,
dram_region=self.dram_region,
src_data_region=self.src_data_region,
dst_data_region=self.dst_data_region,
dim_array=PhyDim2(16, 16),
size_gbuf=131072,
size_regf=512,
array_bus_width=8,
dram_bandwidth=None,
no_time_mux=False,
)
with self.assertRaisesRegexp(ValueError,
'Resource: .*dram_bandwidth.*'):
_ = Resource(proc_region=self.proc_region,
dram_region=self.dram_region,
src_data_region=self.src_data_region,
dst_data_region=self.dst_data_region,
dim_array=PhyDim2(16, 16),
size_gbuf=131072,
size_regf=512,
array_bus_width=8,
dram_bandwidth=-3,
no_time_mux=False,
)
with self.assertRaisesRegexp(ValueError,
'Resource: .*dram_bandwidth.*'):
_ = Resource(proc_region=self.proc_region,
dram_region=self.dram_region,
src_data_region=self.src_data_region,
dst_data_region=self.dst_data_region,
dim_array=PhyDim2(16, 16),
size_gbuf=131072,
size_regf=512,
array_bus_width=8,
dram_bandwidth=0,
no_time_mux=False,
)
def test_invalid_no_time_mux(self):
''' Invalid no_time_mux. '''
with self.assertRaisesRegexp(TypeError,
'Resource: .*no_time_mux.*'):
_ = Resource(proc_region=self.proc_region,
dram_region=self.dram_region,
src_data_region=self.src_data_region,
dst_data_region=self.dst_data_region,
dim_array=PhyDim2(16, 16),
size_gbuf=131072,
size_regf=512,
array_bus_width=8,
dram_bandwidth=128,
no_time_mux=None,
)
| 2.15625 | 2 |
eworkshop/services/models/service_type.py | frankfern/eWorkshop-api | 0 | 12792176 | <reponame>frankfern/eWorkshop-api
from django.db import models
from eworkshop.utils.models import TimeModel
class ServiceType(TimeModel):
service_name = models.CharField(max_length=10, blank=False)
def __str__(self) -> str:
return self.service_name
| 2.21875 | 2 |
test.py | shl3807/ib_insync_ | 4 | 12792177 | from ib_insync import *
ib = IB()
ib.connect('127.0.0.1', 7497, clientId=1)
contract = Forex('EURUSD')
bars = ib.reqHistoricalData(contract, endDateTime='', durationStr='30 D',
barSizeSetting='1 hour', whatToShow='MIDPOINT', useRTH=True)
# convert to pandas dataframe:
df = util.df(bars)
print(df[['date', 'open', 'high', 'low', 'close']]) | 2.46875 | 2 |
dpm/transforms/affine.py | nextBillyonair/DPM | 1 | 12792178 | from .transform import Transform
from torch.nn import Parameter
import torch
class Affine(Transform):
def __init__(self, loc=0.0, scale=1.0, learnable=True):
super().__init__()
if not isinstance(loc, torch.Tensor):
loc = torch.tensor(loc).view(1, -1)
if not isinstance(scale, torch.Tensor):
scale = torch.tensor(scale).view(1, -1)
self.loc = loc.float()
self.scale = scale.float()
self.n_dims = len(loc)
if learnable:
self.loc = Parameter(self.loc)
self.scale = Parameter(self.scale)
def forward(self, x):
return self.loc + self.scale * x
def inverse(self, y):
return (y - self.loc) / self.scale
def log_abs_det_jacobian(self, x, y):
return torch.log(torch.abs(self.scale.expand(x.size()))).sum(-1)
def get_parameters(self):
return {'type':'affine', 'loc':self.loc.detach().numpy(),
'scale':self.scale.detach().numpy()}
| 2.359375 | 2 |
entity/turret/Turret.py | SamJakob/SpaceInvaders2 | 0 | 12792179 | <gh_stars>0
import pygame
class Turret:
def __init__(self, MovementControls):
self.speed = 40
self.lives = 3
self.score = 0
self.spriteImage = "assets/sprites/turret/shooter.png"
self.sprite = pygame.image.load(self.spriteImage)
spriteRect = self.sprite.get_rect()
self.size = (spriteRect.width, spriteRect.height)
self.x = 0
self.y = 0
self.movementControls = MovementControls
def getScore(self):
return self.score
def setScore(self, score):
self.score = score
def addScore(self, scoreDelta):
self.score += scoreDelta
def removeScore(self, negativeScoreDelta):
self.score -= negativeScoreDelta
def getLives(self):
return self.lives
def setLives(self, lives):
self.lives = lives
def getSprite(self):
return self.sprite
def setX(self, x):
self.x = x
def getX(self):
return self.x
def setY(self, y):
self.y = y
def getY(self):
return self.y
def getSize(self):
return self.size
def getLocation(self):
return (self.x, self.y)
def getRect(self):
return pygame.Rect(
self.x,
self.y,
self.getSize()[0],
self.getSize()[1]
)
def setMovementControls(self, MovementControls):
self.movementControls = MovementControls
def getMovementControls(self):
return self.movementControls
| 2.96875 | 3 |
tests/calculate_syndrome_tests.py | Alasdair-Macindoe/HammingCodes | 0 | 12792180 | <reponame>Alasdair-Macindoe/HammingCodes<gh_stars>0
"""
Lines 5 and 6 were adapted from SO code:
http://stackoverflow.com/questions/4383571/importing-files-from-different-folder-in-python
"""
import sys
sys.path.insert(0, '..')
""" END """
import main as program
import pytest
def test_example():
""" From Lecture 7 - 01 """
v = program.binary_matrix([[1,1,0,0,0,0,0]])
H = program.get_parity_check(3)
syndromes = program.create_syndrome_dict(7,3)
vH = program.calculate_syndrome(v,H)
assert vH == [0,1,1]
| 2.703125 | 3 |
automation_infra/plugins/run.py | AnyVisionltd/automation-infra | 6 | 12792181 | <reponame>AnyVisionltd/automation-infra<filename>automation_infra/plugins/run.py
import socket
import logging
from automation_infra.plugins import background, parallel
import tempfile
import os
import subprocess
from subprocess import CalledProcessError
class Run(object):
def __init__(self, ssh_client):
self._ssh_client = ssh_client
self._logger = logging.getLogger('ssh')
def script(self, bash_script, output_timeout=20 * 60):
return self.script_v2(bash_script, output_timeout).stdout
def script_v2(self, bash_script, output_timeout=20 * 60):
self._logger.debug("Running bash script:\n\n%(bash_script)s\n", dict(bash_script=bash_script))
command = "\n".join([
"sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF'",
bash_script,
"RACKATTACK_SSH_RUN_SCRIPT_EOF\n"])
return self.execute(command, output_timeout)
def execute(self, command, output_timeout=20 * 60):
transport = self._ssh_client.get_transport()
chan = transport.open_session()
try:
chan.exec_command(command)
chan.settimeout(output_timeout)
stdin = chan.makefile('wb', -1)
stdout = chan.makefile('rb', -1)
stderr = chan.makefile_stderr('rb', -1)
stdin.close()
output = self._read_output(stdout, output_timeout)
status = chan.recv_exit_status()
error = stderr.read().decode('utf-8')
completed_process = subprocess.CompletedProcess(command, status)
completed_process.stderr = error
completed_process.stdout = output
stdout.close()
stderr.close()
self._logger.debug("SSH command: %(command)s", dict(command=command))
self._logger.debug("Execution output: %(output)s", dict(output=output))
if status != 0:
self._logger.debug(f'command {command} failed with errorcode {completed_process.returncode}: \nstdout: {completed_process.stdout} \nstderr: {completed_process.stderr}')
raise CalledProcessError(completed_process.returncode,
completed_process.args,
completed_process.stdout,
completed_process.stderr)
return completed_process
finally:
chan.close()
def _read_output(self, stdout, output_timeout):
output_array = []
try:
while True:
segment = stdout.read().decode('utf-8')
if segment == "":
break
output_array.append(segment)
except socket.timeout:
output = "".join(output_array)
e = socket.timeout(
"Timeout executing, no input for timeout of '%s'. Partial output was\n:%s" % (
output_timeout, output))
e.output = output
raise e
return "".join(output_array)
def _exec(self, command):
transport = self._ssh_client.get_transport()
chan = transport.open_session()
try:
chan.exec_command(command)
status = chan.recv_exit_status()
if status != 0:
raise subprocess.CalledProcessError(status, command)
finally:
chan.close()
def background_script(self, bash_script):
random_base = tempfile.mktemp(prefix='background', dir='/tmp/')
pid_filename = random_base + ".pid"
out_filename = random_base + ".out"
err_filename = random_base + ".err"
status_filename = random_base + ".retcode"
command = "\n".join([
"nohup sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null &",
"(%(bash_script)s 1>%(out)s 2>%(err)s& echo $!>%(pid)s;wait $!);echo $?>%(status)s" % dict(
bash_script=bash_script,
out=out_filename,
err=err_filename,
pid=pid_filename,
status=status_filename),
"RACKATTACK_SSH_RUN_SCRIPT_EOF\n"])
try:
self._exec(command)
except CalledProcessError as e:
raise Exception("Failed running '%s', status '%s'" % (bash_script, e.returncode))
return background.Background(self, bash_script, pid_filename, out_filename, err_filename, status_filename)
def _parallel_commands(self, base_dir, scripts, max_jobs, command_suffix=""):
max_jobs = max_jobs or 0
script_commands = ["((%(script)s 1>%(out)s 2>%(err)s& echo $!>%(pid)s;wait $!);echo $?>%(status)s)" %
dict(script=script,
out=parallel.Parallel.outfile(base_dir, i),
err=parallel.Parallel.errfile(base_dir, i),
status=parallel.Parallel.statusfile(base_dir, i),
pid=parallel.Parallel.pidfile(base_dir, i))
for i, script in enumerate(scripts)]
joined_script_commands = "\n".join(script_commands)
return "\n".join(["parallel --no-notice --jobs=%d << 'PARALLEL_SCRIPT' %s" % (max_jobs, command_suffix),
joined_script_commands,
"PARALLEL_SCRIPT\n"])
def parallel(self, scripts, max_jobs=None, output_timeout=20 * 60):
base_dir = tempfile.mktemp(dir='/tmp/')
parallel_cmd = self._parallel_commands(base_dir, scripts, max_jobs)
command = "\n".join(["mkdir %s" % base_dir, parallel_cmd])
try:
self.execute(command, output_timeout)
return parallel.Parallel(self, scripts, base_dir)
except Exception as e:
e.args += ('When running bash script "%s"' % command),
raise
def background_parallel(self, scripts, max_jobs=None):
base_dir = tempfile.mktemp(dir='/tmp/')
pid_filename = os.path.join(base_dir, "parallel.pid")
out_filename = os.path.join(base_dir, "parallel.out")
err_filename = os.path.join(base_dir, "parallel.err")
status_filename = os.path.join(base_dir, "parallel.status")
parallel_cmd = self._parallel_commands(base_dir, scripts, max_jobs, " 1>%s 2>%s &" % (out_filename, err_filename))
command = "\n".join(["mkdir %s" % base_dir,
"nohup sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null &",
"(%(bash_script)s echo $!>%(pid)s;wait $!);echo $?>%(status)s" % dict(
bash_script=parallel_cmd,
out=out_filename,
err=err_filename,
status=status_filename,
pid=pid_filename),
"RACKATTACK_SSH_RUN_SCRIPT_EOF\n"])
try:
self._exec(command)
return parallel.BackgroundParallel(self, scripts, base_dir, pid_filename, out_filename, err_filename, status_filename)
except CalledProcessError as e:
raise Exception("Failed running '%s', status '%s'" % (scripts, e.returncode))
| 1.96875 | 2 |
config.py | jonas-scholz123/polarisation | 0 | 12792182 | <filename>config.py
DATA_PATH = "/media/jonas/Extreme SSD/research/polarisation/data/2019_12/"
ADJACENCY_MATRIX_PATH = "./intermediate_data/network_matrices/"
ID_DICT_PATH = "./intermediate_data/id_to_sub_dicts/"
EDGE_LIST_PATH = "./intermediate_data/edge_lists/"
COUNT_COL_NAME = "f0_"
MIN_COUNT_BOT_EXCLUSION = 500
SUBREDDIT_COMMENT_THRESHOLD = 500
REBUILD = False
SAVE = True
SAMPLE_DF = False
MONTH = "2019_12" | 1.148438 | 1 |
tests/test_copy_samples.py | EdinburghGenomics/clarity_scripts | 2 | 12792183 | from builtins import sorted
from itertools import cycle
from unittest.mock import patch, Mock
from pyclarity_lims.entities import Sample
from scripts.copy_samples import Container
from scripts.copy_samples import CopySamples
from tests.test_common import TestEPP, FakeEntitiesMaker
class TestCopySamples(TestEPP):
mocked_step = Mock(details=Mock(udf={}), actions=Mock(next_actions=[{}]))
patched_get_workflow_stage = patch('scripts.copy_samples.get_workflow_stage', return_value=Mock(uri='a_uri',
step=mocked_step))
patched_create_batch = patch('lims.copy_samples.create_batch', return_value=True)
@staticmethod
def get_patch_create_container(container):
return patch.object(Container, 'create', return_value=container)
def setUp(self):
self.epp = CopySamples(self.default_argv)
self.fem_params = {
'nb_input': 2,
'project_name': 'X99999',
'process_id': '99-9999',
'input_container_name': 'X99999P001',
'sample_name': cycle(['X99999P001A01',
'X99999P001B01']),
'sample_udfs': {
'Prep Workflow': cycle(['TruSeq Nano DNA Sample Prep', 'TruSeq PCR-Free DNA Sample Prep']),
'Coverage (X)': cycle([30, 60]),
'Required Yield (Gb)': cycle([120, 240]),
'Delivery': cycle(['merged', 'split']),
'Analysis Type': cycle(['Variant Calling gatk', 'None']),
'Rapid Analysis': cycle(['No', 'Yes']),
'User Prepared Library': cycle(['No', 'Yes']),
'Species': cycle(['Homo sapiens', 'Mus musculus']),
'Genome Version': cycle(['hg38', 'hg19']),
},
'step_udfs': {'Container Type': '96 well plate'},
'output_per_input': 0,
'process_id': '99-9999'
}
def test_copy_samples(self):
fem = FakeEntitiesMaker()
self.epp.lims = fem.lims
self.epp.process = fem.create_a_fake_process(**self.fem_params)
self.epp.lims.get_containers = Mock(return_value=[])
self.workflow_stage = Mock(uri='a_uri')
self.patch_Step_create = patch('scripts.copy_samples.Step.create', return_value=self.mocked_step)
with self.get_patch_create_container(fem.create_a_fake_container(container_name='X99999P002')), \
self.patched_get_workflow_stage as pws, self.patch_Step_create as psc:
self.epp._run()
expected_create_samples_list = [{
'container': fem.object_store_per_type['Container'][1],
'project': fem.object_store_per_type['Project'][0],
'name': 'X99999P002A01', 'position': 'A:1',
'udf': {'Prep Workflow': 'TruSeq Nano DNA Sample Prep',
'Coverage (X)': 30,
'Required Yield (Gb)': 120,
'Delivery': 'merged',
'User Prepared Library': 'No',
'Analysis Type': 'Variant Calling gatk',
'Rapid Analysis': 'No',
'Species': 'Homo sapiens',
'Genome Version': 'hg38',
}},
{
'container': fem.object_store_per_type['Container'][1],
'project': fem.object_store_per_type['Project'][0],
'name': 'X99999P002B01',
'position': 'B:1',
'udf': {'Prep Workflow': 'TruSeq PCR-Free DNA Sample Prep',
'Coverage (X)': 60,
'Required Yield (Gb)': 240,
'Delivery': 'split',
'Analysis Type': 'None',
'User Prepared Library': 'Yes',
'Rapid Analysis': 'Yes',
'Species': 'Mus musculus',
'Genome Version': 'hg19',
}},
]
self.epp.lims.create_batch.assert_called_once_with(Sample, expected_create_samples_list)
pws.assert_any_call(self.epp.lims, 'PreSeqLab EG2.1 WF', 'Create Manifest EG 1.0 ST')
pws.assert_any_call(self.epp.lims, "Remove From Processing EG 1.0 WF", "Remove From Processing EG 1.0 ST")
# test step creation
inputs_project_step_creation = []
inputs_project_step_creation_dict = {
self.epp.artifacts[0].samples[0].artifact.name: self.epp.artifacts[0].samples[0].artifact,
self.epp.artifacts[1].samples[0].artifact.name: self.epp.artifacts[1].samples[0].artifact}
for input in sorted(inputs_project_step_creation_dict):
inputs_project_step_creation.append(inputs_project_step_creation_dict[input])
psc.assert_called_with(
self.epp.lims,
inputs=inputs_project_step_creation,
protocol_step=self.mocked_step,
container_type_name='Tube'
)
| 2.03125 | 2 |
conversion.py | casperwang/autovc | 2 | 12792184 | <reponame>casperwang/autovc
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
#from resemblyzer import preprocess_wav, VoiceEncoder #Style encoder
import os
import pickle
import torch
import numpy as np
import data_loader.dataLoader as datas
from math import ceil
from model_vc import Generator
device = 'cpu'
G = Generator(32,256,512,32).eval().to(device)
g_checkpoint = torch.load('train_weights.ckpt', map_location = torch.device('cpu')) #AutoVC model weights
G.load_state_dict(g_checkpoint['model'])
data = datas.voiceDataset()
metadata = [data[0]]
spect_vc = []
for sbmt_i in metadata:
x_org = sbmt_i['spectrogram']
uttr_org = x_org
emb_org = sbmt_i['style'][np.newaxis, :]
for sbmt_j in metadata:
emb_trg = sbmt_j["style"][np.newaxis, :]
tmp = np.zeros((256), dtype='float64')
tmp[0] = 1
with torch.no_grad():
_, x_identic_psnt, _ = G(uttr_org, torch.from_numpy(tmp).cpu().float()[np.newaxis, :], emb_trg)
uttr_trg = x_identic_psnt[0, 0, :, :].cpu().numpy()
spect_vc.append( ('{}x{}'.format(sbmt_i["person"], sbmt_j["person"]), uttr_trg) )
with open('results.pkl', 'wb') as handle:
pickle.dump(spect_vc, handle)
| 2 | 2 |
samples/miyukiCamera/sequentialDetecter_1.py | scrambleegg7/Mask_RCNN | 0 | 12792185 | #
# Object detector (by sequential file read from directory)
#
import os
import sys
import random
import math
import re
import time
import numpy as np
import tensorflow as tf
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from glob import glob
import argparse
import skimage
import shutil
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
import mrcnn.model as modellib
from mrcnn.model import log
#from samples.cats_dogs import cats_dogs
from samples.miyukiCamera import miyukiCamera
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
def process():
class InferenceConfig(miyukiCamera.MiyukiCameraConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
#config.display()
# Device to load the neural network on.
# Useful if you're training a model on the same
# machine, in which case use CPU and leave the
# GPU for training.
DEVICE = "/gpu:0" # /cpu:0 or /gpu:0
# Inspect the model in training or inference modes
# values: 'inference' or 'training'
# TODO: code for 'training' test mode not ready yet
TEST_MODE = "inference"
# set model
# Create model in inference mode
with tf.device(DEVICE):
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=MODEL_DIR)
# Or, load the last model you trained
weights_path = model.find_last()
# Load weights
print("Loading weights ", weights_path)
model.load_weights(weights_path, by_name=True)
return model, config
def detector(model,config, dataset, DATA_DIR):
MRCNN_DATA_DIR = "/".join( DATA_DIR.split('/')[:-1] )
MRCNN_DATA_DIR = os.path.join( MRCNN_DATA_DIR, "mrcnn_image")
print(MRCNN_DATA_DIR)
images = glob( os.path.join(DATA_DIR, "*.jpg") )
print("* total length of images : ", len(images) )
for f in images:
print("Running on {}".format(f))
# Read image
image = skimage.io.imread(f)
# Detect objects
results = model.detect([image], verbose=1)
r = results[0]
print("- " * 40 )
print("Scores --> ", r['scores'])
print("found Class Names --> ", [dataset.class_info[i]["name"] for i in r['class_ids']] )
classes = [dataset.class_info[i]["name"] for i in r['class_ids']]
if "prescription" in classes:
print("found prescription on %s" % f.split("/")[-1])
image_file = f.split("/")[-1]
shutil.copy( f, os.path.join( MRCNN_DATA_DIR, image_file ) )
def main():
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Sequential Reading File Object Detector.')
parser.add_argument('--dataset', required=True,
metavar="/path/to/balloon/dataset",
help='Directory of the target dataset to detect')
args = parser.parse_args()
assert args.dataset ,\
"Provide --image directory to apply detector"
model, config = process()
dataset = miyukiCamera.MiyukiCameraDataset()
DATA_DIR = args.dataset
detector(model, config, dataset, DATA_DIR)
if __name__ == "__main__":
main() | 2.46875 | 2 |
meraki_sdk/models/device_policy_enum.py | meraki/meraki-python-sdk | 37 | 12792186 | # -*- coding: utf-8 -*-
"""
meraki_sdk
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
class DevicePolicyEnum(object):
"""Implementation of the 'DevicePolicy' enum.
The policy to apply to the specified client. Can be 'Whitelisted',
'Blocked', 'Normal' or 'Group policy'. Required.
Attributes:
WHITELISTED: TODO: type description here.
BLOCKED: TODO: type description here.
NORMAL: TODO: type description here.
ENUM_GROUP POLICY: TODO: type description here.
"""
WHITELISTED = 'Whitelisted'
BLOCKED = 'Blocked'
NORMAL = 'Normal'
ENUM_GROUP_POLICY = 'Group policy'
| 1.5 | 2 |
app/djangoci/tests/test_hello_world.py | Dierme/django-circleci | 0 | 12792187 | from django.test import TestCase
from django.test import Client
class HelloWorldTestCase(TestCase):
"""Hello world tests"""
def test_successful_case(self):
"""Successful test case"""
self.assertTrue(True)
def test_http_request(self):
client = Client()
response = client.get("/")
self.assertEqual(200, response.status_code)
self.assertEqual(response.content.decode(), "Hello world from Django! You're at the index. Bingo")
| 2.703125 | 3 |
modelmapper/base.py | wearefair/modelmapper | 5 | 12792188 | import os
import sys
import logging
import importlib
from ast import literal_eval
from copy import deepcopy
from collections import defaultdict
from collections import namedtuple, Counter
from modelmapper.misc import read_csv_gen, load_toml, camel_to_snake
from modelmapper.slack import slack
OVERRIDES_FILE_NAME = "{}_overrides.toml"
COMBINED_FILE_NAME = "{}_combined.py"
class Base:
logger = logging.getLogger(__name__)
SETUP_PATH = None
def __init__(self, setup_path=None, debug=False):
self.setup_path = setup_path or getattr(self, 'SETUP_PATH', None)
if self.setup_path is None:
raise ValueError('setup_path needs to be passed to init or SETUP_PATH needs to be a class attribute.')
if not self.setup_path.endswith('_setup.toml'):
raise ValueError('The path needs to end with _setup.toml')
self.debug = debug
self.setup_dir = os.path.dirname(self.setup_path)
sys.path.append(self.setup_dir)
clean_later = ['field_name_full_conversion', 'ignore_fields_in_signature_calculation',
'identify_header_by_column_names', 'fields_to_be_encrypted', 'fields_to_be_scrubbed']
convert_to_set = ['null_values', 'boolean_true', 'boolean_false', 'datetime_formats',
'ignore_lines_that_include_only_subset_of',
'ignore_fields_in_signature_calculation', 'identify_header_by_column_names']
self._original_settings = load_toml(self.setup_path)['settings']
self.settings = deepcopy(self._original_settings)
for item in clean_later:
self._clean_settings_items(item)
for item in convert_to_set:
self.settings[item] = set(self.settings.get(item, []))
key = 'default_value_for_field_when_casting_error'
self.settings[key] = self.settings.get(key) or r'{}'
self.settings[key] = {self._clean_it(i): v for i, v in literal_eval(self.settings[key]).items()}
slack_http_endpoint = self.settings['slack_http_endpoint']
# attempt to get passed in value from ENV VAR, defaulting to passed in value if not present
slack_http_endpoint = os.environ.get(slack_http_endpoint, slack_http_endpoint)
self.settings['should_reprocess'] = self.settings.get('should_reprocess', False)
self.settings['slack_http_endpoint'] = slack_http_endpoint
self.settings['identifier'] = identifier = os.path.basename(self.setup_path).replace('_setup.toml', '')
self.settings['overrides_file_name'] = OVERRIDES_FILE_NAME.format(identifier)
self.settings['combined_file_name'] = COMBINED_FILE_NAME.format(identifier)
self.settings['booleans'] = self.settings['boolean_true'] | self.settings['boolean_false']
self.settings['datetime_allowed_characters'] = set(self.settings['datetime_allowed_characters'])
for i, v in (('overrides_path', 'overrides_file_name'),
('combined_path', 'combined_file_name'),
('output_model_path', 'output_model_file')):
self.settings[i] = os.path.join(self.setup_dir, self.settings[v])
# Since we cleaning up the field_name_part_conversion, special characters
# such as \n need to be added seperately.
# self.settings['field_name_part_conversion'].insert(0, ['\n', '_']).insert(0, ['\r\n', '_'])
_max_int = ((i, int(v)) for i, v in self.settings['max_int'].items())
self.settings['max_int'] = dict(sorted(_max_int, key=lambda x: x[1]))
Settings = namedtuple('Settings', ' '.join(self.settings.keys()))
self.settings = Settings(**self.settings)
self.questionable_fields = {}
self.solid_decisions = {}
self.failed_to_infer_fields = set()
self.empty_fields = set()
def _clean_it(self, item):
conv = (self.settings['field_name_part_conversion'] if isinstance(self.settings, dict)
else self.settings.field_name_part_conversion)
item = item.replace('\r\n', '_').replace('\n', '_')
item = camel_to_snake(item)
for source, to_replace in conv:
item = item.replace(source, to_replace)
return item.strip('_')
def _clean_settings_items(self, item):
"""
Normalizes list or nested lists
"""
if item not in self.settings:
self.settings[item] = []
try:
first_value = self.settings[item][0]
except IndexError:
pass
else:
if isinstance(first_value, list):
self.settings[item] = [[self._clean_it(i), self._clean_it(j)] for i, j in self.settings[item]]
else:
self.settings[item] = list(map(self._clean_it, self.settings[item]))
def _get_clean_field_name(self, name):
item = self._clean_it(name)
for source, to_replace in self.settings.field_name_full_conversion:
if item == source:
item = to_replace
break
return item
def _get_all_clean_field_names_mapping(self, names):
name_mapping = {}
for name in names:
name_mapping[name] = self._get_clean_field_name(name)
return name_mapping
def _get_combined_module(self):
combined_module_str = self.settings.combined_file_name[:-3]
return importlib.import_module(combined_module_str)
def _verify_no_duplicate_clean_names(self, names_mapping):
clean_names_mapping = {}
for name, clean_name in names_mapping.items():
if clean_name in clean_names_mapping:
raise ValueError(f"'{name}' field has a collision with '{clean_names_mapping[clean_name]}'. "
f"They both produce '{clean_name}'")
else:
clean_names_mapping[clean_name] = name
def _does_line_include_data(self, line):
# whether line has any characters in it that are not in ignore_lines_that_include_only_subset_of
return any(filter(lambda x: set(x.strip()) - self.settings.ignore_lines_that_include_only_subset_of, line))
def _verify_no_duplicate_names(self, names):
counter = Counter(names)
duplicates = {i: v for i, v in counter.most_common(10) if v > 1}
if duplicates:
raise ValueError(f'The following fields were repeated in the csv: {duplicates}')
def _get_clean_names_and_csv_data_gen(self, path):
reader = read_csv_gen(path,
identify_header_by_column_names=self.settings.identify_header_by_column_names,
cleaning_func=self._clean_it)
names = next(reader)
self._verify_no_duplicate_names(names)
name_mapping = self._get_all_clean_field_names_mapping(names)
self._verify_no_duplicate_clean_names(name_mapping)
clean_names = list(name_mapping.values())
return clean_names, reader
def _get_all_values_per_clean_name(self, path):
result = defaultdict(list)
clean_names, reader = self._get_clean_names_and_csv_data_gen(path)
# transposing csv and turning into dictionary
for line in reader:
if self._does_line_include_data(line):
for i, v in enumerate(line):
try:
field_name = clean_names[i]
except IndexError:
raise ValueError("Your data might have new lines in the field names. "
"Please fix that and try again.")
else:
if field_name not in self.settings.fields_to_be_scrubbed:
result[field_name].append(v)
return result
def slack(self, text):
if self.settings.slack_username and \
self.settings.slack_channel and \
self.settings.slack_http_endpoint:
return slack(
text,
username=self.settings.slack_username,
channel=self.settings.slack_channel,
slack_http_endpoint=self.settings.slack_http_endpoint
)
| 2.015625 | 2 |
ddos.py | black-software-Com/Black-Attacker | 1 | 12792189 | <reponame>black-software-Com/Black-Attacker<gh_stars>1-10
#!/usr/bin/python3
# Please read the code. Do not use ctrl + c and ctrl + v (~ ̄▽ ̄)~
import os,sys,socket,threading
try:
from colorama import Fore,init
init()
except ImportError:
os.system("pip install colorama")
end = '\033[0m'
def main():
print()
if sys.argv[1] == sys.argv[1]:
host = sys.argv[1]
port = int(sys.argv[2])
def dos(d):
while True:
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect_ex((host,port))
print(f"{Fore.RED}Packet Send To {Fore.GREEN}{host}{end}")
for i in range(10):
t = threading.Thread(target=dos,args=[i])
t.start()
if __name__ == '__main__':
try:
main()
except (KeyboardInterrupt,EOFError):
print("Stop...")
sys.exit()
| 2.484375 | 2 |
devices/server.py | marcelo-h-h/acoes_broker | 1 | 12792190 | <filename>devices/server.py
import zmq
import random
import time
class Server():
"""
The server is the publisher, it has a stock associated with him and constantly generates a new
variation of the stock price and publishes it to the forwarder, and consequently,to the subs
"""
def __init__(self, front_port: int, stock: str, stock_value: float):
self._stock = stock
self._value = stock_value
port = str(front_port)
self._context = zmq.Context()
self._socket = self._context.socket(zmq.PUB) # Defines its sockets as a pub type socket
self._socket.connect("tcp://localhost:%s" % port) # Bind the socket to the frontend port of the forwarder
def run(self):
try:
while True:
negative = -1 if random.random()>=0.5 else 1 # Decides whether the variation will be positive or negative
variation = random.random() * 0.01 * negative # Decides the variation value
self._value += self._value * variation # Sets up the new value
messagedata = "Stock:%s" % self._stock + "_Value:%f" %self._value + "_Variation:%f" %(variation*100) +"%" # Mount up the message about the stock
self._socket.send_string("%s %s" % (self._stock, messagedata)) # Sends the message through the socket
time.sleep(1)
except:
print('Shutting down server')
finally:
self._socket.close()
self._context.term()
def start_new_server(front_port: int, stock: str, stock_value: float):
s = Server(front_port, stock, stock_value)
s.run()
| 3.28125 | 3 |
spell-checking/predict_zemberek.py | mukayese-nlp/mukayese-baselines | 21 | 12792191 | <reponame>mukayese-nlp/mukayese-baselines
from zemberek import (
TurkishSpellChecker,
TurkishMorphology,
)
import sys
import json
import pandas as pd
from tqdm import tqdm
morphology = TurkishMorphology.create_with_defaults()
sc = TurkishSpellChecker(morphology)
if __name__ == '__main__':
df = pd.read_csv(sys.argv[1])
spellings = {}
for w, g in tqdm(zip(df['input'], df['gold'])):
sugg = sc.suggest_for_word(w)
spellings[w] = { 'input': w, 'gold': str(g), 'spelling': int(w not in sugg), 'suggestions': sugg }
with open(sys.argv[2], 'w') as f:
for w in spellings:
f.write(json.dumps(spellings[w], ensure_ascii=False) + '\n')
| 2.609375 | 3 |
custom/inddex/food.py | dimagilg/commcare-hq | 471 | 12792192 | """
This file contains the logic to generate the master dataset for the INDDEX reports
Overview
--------
Beneficiaries are asked about their diet in a "recall" session. This results in
a "foodrecall" case. Every food they mention results in the creation of a "food"
case that's a child of this foodrecall.
This dataset has a row for every food, with metadata about the recall session,
calculated nutritional information, and auditing columns reporting on what data
is or isn't available. Some of these foods are recipes, and their ingredients
appear as separate rows in the report.
Standard recipes have their ingredients enumerated in the "recipes" lookup
table. This dataset has additional rows inserted for each ingredient. These
rows are associated with the recipe case, but don't have a case of their own.
Nonstandard recipes are defined by the user and beneficiary during a recall
session. The ingredients of the recipe are entered as additional food cases and
linked to the recipe by `recipe_case_id`.
Beneficiaries may report eating a nonstandard recipe more than once, in which
case subsequent references point to the recipe definition with
already_reported_recipe_case_id and don't enumerate the ingredients again. We
need to insert duplicates of the previously reported ingredients into the
report for them.
Components
----------
FoodData :: This is the interface to this dataset, it glues together all the
component pieces and presents the result as a unified dataset.
FoodRow :: Class responsible for row-wise calculations and indicator definitions.
"""
import operator
import uuid
from collections import defaultdict
from functools import reduce
from memoized import memoized
from corehq.apps.es import users as user_es
from corehq.apps.reports.filters.case_list import CaseListFilter as EMWF
from corehq.apps.reports.standard.cases.utils import get_case_owners
from custom.inddex.ucr_data import FoodCaseData
from .const import (
AGE_RANGES,
FOOD_ITEM,
NON_STANDARD_RECIPE,
STANDARD_RECIPE,
ConvFactorGaps,
FctGaps,
)
from .fixtures import FixtureAccessor
IN_UCR = 'in_ucr'
IN_FOOD_FIXTURE = 'in_food_fixture'
IS_RECALL_META = 'is_recall_meta'
CALCULATED_LATER = 'calculated_later'
class I:
def __init__(self, slug, *tags):
self.slug = slug
tags = set(tags)
self.in_ucr = IN_UCR in tags
self.in_food_fixture = IN_FOOD_FIXTURE in tags
self.is_recall_meta = IS_RECALL_META in tags
self.is_calculated_later = CALCULATED_LATER in tags
# Indicator descriptions can be found here:
# https://docs.google.com/spreadsheets/d/1znPjfQSFEUFP_R_G8VYE-Bd5dg72k5sP-hZPuy-3RZo/edit
INDICATORS = [
I('unique_respondent_id', IN_UCR, IS_RECALL_META),
I('location_id', IN_UCR, IS_RECALL_META),
I('respondent_id', IN_UCR, IS_RECALL_META),
I('recall_case_id', IN_UCR, IS_RECALL_META),
I('opened_by_username', IN_UCR, IS_RECALL_META),
I('owner_name', IN_UCR, IS_RECALL_META),
I('visit_date', IN_UCR, IS_RECALL_META),
I('opened_on', IN_UCR, IS_RECALL_META),
I('recall_status', IN_UCR, IS_RECALL_META),
I('gender', IN_UCR, IS_RECALL_META),
I('age_years_calculated', IN_UCR, IS_RECALL_META),
I('age_months_calculated', IN_UCR, IS_RECALL_META),
I('age_range', IS_RECALL_META),
I('pregnant', IN_UCR, IS_RECALL_META),
I('breastfeeding', IN_UCR, IS_RECALL_META),
I('urban_rural', IN_UCR, IS_RECALL_META),
I('supplements', IN_UCR, IS_RECALL_META),
I('food_code', IN_UCR),
I('food_name', IN_UCR, IN_FOOD_FIXTURE),
I('recipe_name', IN_UCR, CALCULATED_LATER),
I('caseid'),
I('food_type', IN_UCR, IN_FOOD_FIXTURE),
I('food_status', IN_UCR, IS_RECALL_META),
I('reference_food_code'),
I('base_term_food_code', IN_UCR),
I('include_in_analysis'),
I('fao_who_gift_food_group_code'),
I('fao_who_gift_food_group_description'),
I('user_food_group'),
I('eating_time', IN_UCR, IS_RECALL_META),
I('time_block', IN_UCR, IS_RECALL_META),
I('already_reported_food', IN_UCR),
I('already_reported_food_case_id', IN_UCR),
I('already_reported_recipe', IN_UCR),
I('already_reported_recipe_case_id', IN_UCR),
I('already_reported_recipe_name', IN_UCR),
I('is_ingredient', IN_UCR),
I('ingredient_type', CALCULATED_LATER),
I('recipe_case_id', IN_UCR),
I('ingr_recipe_code'),
I('ingr_fraction'),
I('ingr_recipe_total_grams_consumed', CALCULATED_LATER),
I('short_name', IN_UCR),
I('food_base_term', IN_UCR, IN_FOOD_FIXTURE),
I('tag_1', IN_UCR, IN_FOOD_FIXTURE),
I('other_tag_1', IN_UCR),
I('tag_2', IN_UCR, IN_FOOD_FIXTURE),
I('other_tag_2', IN_UCR),
I('tag_3', IN_UCR, IN_FOOD_FIXTURE),
I('other_tag_3', IN_UCR),
I('tag_4', IN_UCR, IN_FOOD_FIXTURE),
I('other_tag_4', IN_UCR),
I('tag_5', IN_UCR, IN_FOOD_FIXTURE),
I('other_tag_5', IN_UCR),
I('tag_6', IN_UCR, IN_FOOD_FIXTURE),
I('other_tag_6', IN_UCR),
I('tag_7', IN_UCR, IN_FOOD_FIXTURE),
I('other_tag_7', IN_UCR),
I('tag_8', IN_UCR, IN_FOOD_FIXTURE),
I('other_tag_8', IN_UCR),
I('tag_9', IN_UCR, IN_FOOD_FIXTURE),
I('other_tag_9', IN_UCR),
I('tag_10', IN_UCR, IN_FOOD_FIXTURE),
I('other_tag_10', IN_UCR),
I('conv_method_code', IN_UCR),
I('conv_method_desc', IN_UCR),
I('conv_option_code', IN_UCR),
I('conv_option_desc', IN_UCR),
I('measurement_amount', IN_UCR),
I('conv_units', IN_UCR),
I('portions', IN_UCR),
I('nsr_conv_method_code_post_cooking', IN_UCR),
I('nsr_conv_method_desc_post_cooking', IN_UCR),
I('nsr_conv_option_code_post_cooking', IN_UCR),
I('nsr_conv_option_desc_post_cooking', IN_UCR),
I('nsr_measurement_amount_post_cooking', IN_UCR),
I('nsr_consumed_cooked_fraction', IN_UCR),
I('recipe_num_ingredients', CALCULATED_LATER),
I('conv_factor_food_code'),
I('conv_factor_base_term_food_code'),
I('conv_factor_used'),
I('conv_factor'),
I('fct_food_code_exists'),
I('fct_base_term_food_code_exists'),
I('fct_reference_food_code_exists'),
I('fct_data_used'),
I('fct_code'),
I('total_grams', CALCULATED_LATER),
I('conv_factor_gap_code'),
I('conv_factor_gap_desc'),
I('fct_gap_code', CALCULATED_LATER),
I('fct_gap_desc', CALCULATED_LATER),
]
_INDICATORS_BY_SLUG = {i.slug: i for i in INDICATORS}
NSR_COLS_TO_COPY = [
'nsr_conv_method_code_post_cooking',
'nsr_conv_method_desc_post_cooking',
'nsr_conv_option_code_post_cooking',
'nsr_conv_option_desc_post_cooking',
'nsr_measurement_amount_post_cooking',
'nsr_consumed_cooked_fraction',
]
class FoodRow:
def __init__(self, ucr_row, fixtures, ingredient=None):
self.uuid = uuid.uuid4()
self.ucr_row = ucr_row
self.fixtures = fixtures
self._is_std_recipe_ingredient = bool(ingredient)
if self._is_std_recipe_ingredient:
self.food_code = ingredient.ingr_code
self._set_ingredient_fields(ingredient)
else:
self.caseid = ucr_row['doc_id']
self.food_code = ucr_row['food_code']
if not self.food_code and self.food_name in self.fixtures.foods_by_name:
self.food_code = self.fixtures.foods_by_name[self.food_name].food_code
if not self.base_term_food_code and self.food_base_term in self.fixtures.foods_by_name:
self.base_term_food_code = self.fixtures.foods_by_name[self.food_base_term].food_code
self._set_composition()
self._set_conversion_factors()
self.is_recipe = self.food_type in (STANDARD_RECIPE, NON_STANDARD_RECIPE)
self.include_in_analysis = not self.is_recipe
self.measurement_amount = _maybe_float(self.measurement_amount)
self.portions = _maybe_float(self.portions)
self.nsr_consumed_cooked_fraction = _maybe_float(self.nsr_consumed_cooked_fraction)
self.enrichment_complete = False
def _set_ingredient_fields(self, ingredient):
if self._is_std_recipe_ingredient:
self.is_ingredient = 'yes'
self.ingr_recipe_code = ingredient.recipe_code
self.ingr_fraction = ingredient.ingr_fraction
def _set_composition(self):
# Get the food composition corresponding to food_code, fall back to base_term_food_code
fct = self.fixtures.food_compositions
self.fct_food_code_exists = bool(self.food_code and self.food_code in fct)
self.fct_base_term_food_code_exists = bool(self.base_term_food_code and self.base_term_food_code in fct)
self.fct_code = None
if self.fct_food_code_exists:
self.fct_code = self.food_code
self.fct_data_used = 'food_code'
elif self.fct_base_term_food_code_exists:
self.fct_code = self.base_term_food_code
self.fct_data_used = 'base_term_food_code'
if self.fct_code:
self.composition = fct[self.fct_code]
self.fao_who_gift_food_group_code = self.composition.fao_who_gift_food_group_code
self.fao_who_gift_food_group_description = self.composition.fao_who_gift_food_group_description
self.user_food_group = self.composition.user_defined_food_group
self.reference_food_code = self.composition.reference_food_code_for_food_composition
if self.fct_data_used == 'food_code' and self.reference_food_code:
self.fct_data_used = 'reference_food_code'
self.fct_reference_food_code_exists = bool(self.reference_food_code)
def set_fct_gap(self, ingredients=None):
if ingredients:
for row in ingredients:
row.set_fct_gap()
self.fct_gap_code = FctGaps.NOT_AVAILABLE
if self.food_type == FOOD_ITEM and self.fct_code:
self.fct_gap_code = {
'food_code': FctGaps.AVAILABLE,
'base_term_food_code': FctGaps.BASE_TERM,
'reference_food_code': FctGaps.REFERENCE,
}[self.fct_data_used]
if self.is_recipe and ingredients:
if all(i.fct_gap_code == FctGaps.AVAILABLE for i in ingredients):
self.fct_gap_code = FctGaps.AVAILABLE
else:
self.fct_gap_code = FctGaps.INGREDIENT_GAPS
self.fct_gap_desc = FctGaps.DESCRIPTIONS[self.fct_gap_code]
def _set_conversion_factors(self):
self.conv_factor_gap_code = ConvFactorGaps.NOT_AVAILABLE
if (self.food_type == FOOD_ITEM and self._is_std_recipe_ingredient
or self.food_type == NON_STANDARD_RECIPE):
self.conv_factor_gap_code = ConvFactorGaps.NOT_APPLICABLE
elif self.food_type in (FOOD_ITEM, STANDARD_RECIPE) and self.conv_method_code:
self.conv_factor_food_code = self.fixtures.conversion_factors.get(
(self.food_code, self.conv_method_code, self.conv_option_code))
self.conv_factor_base_term_food_code = self.fixtures.conversion_factors.get(
(self.base_term_food_code, self.conv_method_code, self.conv_option_code))
if self.conv_factor_food_code:
self.conv_factor_used = 'food_code'
self.conv_factor = self.conv_factor_food_code
self.conv_factor_gap_code = ConvFactorGaps.AVAILABLE
elif self.conv_factor_base_term_food_code:
self.conv_factor_used = 'base_term_food_code'
self.conv_factor = self.conv_factor_base_term_food_code
self.conv_factor_gap_code = ConvFactorGaps.BASE_TERM
self.conv_factor_gap_desc = ConvFactorGaps.DESCRIPTIONS[self.conv_factor_gap_code]
@property
def age_range(self):
if not self.age_months_calculated:
return None
for age_range in AGE_RANGES:
if age_range.lower_bound <= getattr(self, age_range.column) < age_range.upper_bound:
return age_range.name
def get_nutrient_per_100g(self, nutrient_name):
if self.fct_code:
return self.composition.nutrients.get(nutrient_name)
def get_nutrient_amt(self, nutrient_name):
return _multiply(self.get_nutrient_per_100g(nutrient_name), self.total_grams, 0.01)
def __getattr__(self, name):
if name in _INDICATORS_BY_SLUG:
indicator = _INDICATORS_BY_SLUG[name]
if indicator.is_calculated_later:
if not self.enrichment_complete:
raise AttributeError(f"{name} hasn't yet been set. It will be "
"calculated outside the scope of FoodRow.")
return None
if self._is_std_recipe_ingredient:
# If it's an indicator that hasn't been explicitly set, check if it can
# be pulled from the food fixture or from the parent food case's UCR
if indicator.in_food_fixture:
return getattr(self.fixtures.foods[self.food_code], indicator.slug)
if indicator.is_recall_meta:
return self.ucr_row[indicator.slug]
return None
else:
# If it's an indicator in the UCR that hasn't been explicitly set, return that val
return self.ucr_row[indicator.slug] if indicator.in_ucr else None
raise AttributeError(f"FoodRow has no definition for {name}")
class FoodData:
"""Generates the primary dataset for INDDEX reports. See file docstring for more."""
IN_MEMORY_FILTERS = ['gap_type', 'fao_who_gift_food_group_code', 'food_type']
FILTERABLE_COLUMNS = IN_MEMORY_FILTERS + FoodCaseData.FILTERABLE_COLUMNS
def __init__(self, domain, *, datespan, filter_selections):
for slug in filter_selections:
if slug not in self.FILTERABLE_COLUMNS:
raise AssertionError(f"{slug} is not a valid filter slug")
self.fixtures = FixtureAccessor(domain)
self._in_memory_filter_selections = {
slug: filter_selections[slug] for slug in self.IN_MEMORY_FILTERS
if slug in filter_selections
}
self._ucr = FoodCaseData({
'domain': domain,
'startdate': str(datespan.startdate),
'enddate': str(datespan.enddate),
**{k: v for k, v in filter_selections.items()
if k in FoodCaseData.FILTERABLE_COLUMNS}
})
@classmethod
def from_request(cls, domain, request):
return cls(
domain,
datespan=request.datespan,
filter_selections={'owner_id': cls._get_owner_ids(domain, request),
**{k: [v for v in request.GET.getlist(k) if v]
for k in cls.FILTERABLE_COLUMNS if k != 'owner_id'}}
)
@staticmethod
def _get_owner_ids(domain, request):
slugs = request.GET.getlist(EMWF.slug)
if EMWF.no_filters_selected(slugs) or EMWF.show_all_data(slugs) or EMWF.show_project_data(slugs):
return [] # don't filter by owner
if EMWF.show_deactivated_data(slugs):
return (user_es.UserES()
.show_only_inactive()
.domain(domain)
.get_ids())
return get_case_owners(request, domain, slugs)
def _matches_in_memory_filters(self, row):
# If a gap type is specified, show only rows with gaps of that type
gap_type = self._in_memory_filter_selections.get('gap_type')
if gap_type == ConvFactorGaps.slug and row.conv_factor_gap_code == ConvFactorGaps.AVAILABLE:
return False
if gap_type == FctGaps.slug and row.fct_gap_code == FctGaps.AVAILABLE:
return False
food_types = self._in_memory_filter_selections.get('food_type')
if food_types and row.food_type not in food_types:
return False
food_groups = self._in_memory_filter_selections.get('fao_who_gift_food_group_code')
if food_groups and row.fao_who_gift_food_group_code not in food_groups:
return False
return True
def _get_grouped_rows(self):
"""Return raw case rows grouped by recipe"""
rows = defaultdict(lambda: {
'recipe': None,
'references': [],
'ingredients': [],
})
for row in self._ucr.get_data():
if row['food_type'] in (STANDARD_RECIPE, NON_STANDARD_RECIPE):
if row['already_reported_recipe_case_id']:
rows[row['already_reported_recipe_case_id']]['references'].append(row)
else:
rows[row['doc_id']]['recipe'] = row
elif row['recipe_case_id']:
rows[row['recipe_case_id']]['ingredients'].append(row)
else:
# this isn't part of a recipe
rows[row['doc_id']]['ingredients'].append(row)
return rows.values()
def _get_all_rows(self):
for group in self._get_grouped_rows():
master_recipe = group['recipe']
references = group['references']
ingredients = group['ingredients']
if not master_recipe:
yield from self._non_recipe_rows(references + ingredients)
else:
yield from self._recipe_rows(master_recipe, ingredients)
for recipe in references:
recipe = _insert_nsr_cols(recipe, master_recipe)
yield from self._recipe_rows(recipe, ingredients)
@property
@memoized
def rows(self):
rows = []
for row in self._get_all_rows():
if self._matches_in_memory_filters(row):
rows.append(row)
return rows
def _non_recipe_rows(self, rows):
"""These rows aren't part of a recipe, or it wasn't found"""
for raw_row in rows:
row = FoodRow(raw_row, self.fixtures)
row.total_grams = _multiply(row.measurement_amount, row.conv_factor, row.portions)
row.set_fct_gap()
row.enrichment_complete = True
yield row
def _recipe_rows(self, raw_recipe, raw_ingredients):
recipe = FoodRow(raw_recipe, self.fixtures)
if recipe.food_type == STANDARD_RECIPE:
# std recipe ingredients come from the DB, NOT ingredient cases
ingredients = [FoodRow(raw_recipe, self.fixtures, ingredient_data)
for ingredient_data in self.fixtures.recipes[recipe.food_code]]
else: # NON_STANDARD_RECIPE
ingredients = [FoodRow(raw, self.fixtures) for raw in raw_ingredients]
total_grams = _calculate_total_grams(recipe, ingredients)
recipe.set_fct_gap(ingredients)
recipe.recipe_name = recipe.ucr_row['recipe_name']
for row in [recipe] + ingredients:
row.total_grams = total_grams[row.uuid]
row.recipe_num_ingredients = len(ingredients)
row.recipe_case_id = recipe.caseid
if row.is_ingredient == 'yes':
row.recipe_name = recipe.recipe_name
if recipe.food_type == STANDARD_RECIPE:
row.ingredient_type = 'std_recipe_ingredient'
row.ingr_recipe_total_grams_consumed = total_grams[recipe.uuid]
else:
row.ingredient_type = 'non_std_recipe_ingredient'
for col in NSR_COLS_TO_COPY: # Copy these values from the recipe case
setattr(row, col, getattr(recipe, col))
row.enrichment_complete = True
yield row
def _insert_nsr_cols(raw_recipe, master_recipe):
# nsr references are missing some values, insert them from the master recipe
nsr_cols = {col: master_recipe[col] for col in NSR_COLS_TO_COPY}
amount = _maybe_float(raw_recipe['measurement_amount'])
portions = _maybe_float(raw_recipe['portions'])
amount_post_cooking = _maybe_float(master_recipe['nsr_measurement_amount_post_cooking'])
if all(val is not None for val in [amount, portions, amount_post_cooking]):
nsr_cols['nsr_consumed_cooked_fraction'] = amount * portions / amount_post_cooking
else:
nsr_cols['nsr_consumed_cooked_fraction'] = None
return {**raw_recipe, **nsr_cols}
def _calculate_total_grams(recipe, ingredients):
if recipe.food_type == STANDARD_RECIPE:
res = {}
recipe_total = _multiply(recipe.measurement_amount, recipe.conv_factor, recipe.portions)
res[recipe.uuid] = recipe_total
for row in ingredients:
res[row.uuid] = _multiply(recipe_total, row.ingr_fraction)
return res
else: # NON_STANDARD_RECIPE
res = {}
for row in ingredients:
res[row.uuid] = _multiply(row.measurement_amount, row.conv_factor,
row.portions, recipe.nsr_consumed_cooked_fraction)
try:
res[recipe.uuid] = sum(res.values()) if res else None
except TypeError:
res[recipe.uuid] = None
return res
def _multiply(*args):
try:
return reduce(operator.mul, args)
except TypeError:
return None
def _maybe_float(val):
return float(val) if val not in (None, '') else None
| 2.046875 | 2 |
leprikon/utils.py | leprikon-cz/leprikon | 4 | 12792193 | import locale
import os
import re
import string
import unicodedata
import zlib
from datetime import date
from urllib.parse import parse_qs, urlencode
from django.core.exceptions import ObjectDoesNotExist
from django.db import IntegrityError, transaction
from django.urls import reverse_lazy as reverse
from django.utils.encoding import iri_to_uri, smart_text
from django.utils.functional import lazy
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from .conf import settings
MALE = "m"
FEMALE = "f"
def _get_localeconv():
"""
This function loads localeconv during module load.
It is necessary, because using locale.setlocale later may be dangerous
(It is not thread-safe in most of the implementations.)
"""
original_locale_name = locale.setlocale(locale.LC_ALL)
locale_name = locale.locale_alias[settings.LANGUAGE_CODE].split(".")[0] + ".UTF-8"
locale.setlocale(locale.LC_ALL, str(locale_name))
lc = locale.localeconv()
locale.setlocale(locale.LC_ALL, original_locale_name)
return lc
localeconv = _get_localeconv()
# This function is inspired by python's standard locale.currency().
def currency(val, international=False):
"""Formats val according to the currency settings for current language."""
digits = settings.PRICE_DECIMAL_PLACES
# grouping
groups = []
s = str(abs(int(val)))
for interval in locale._grouping_intervals(localeconv["mon_grouping"]):
if not s:
break
groups.append(s[-interval:])
s = s[:-interval]
if s:
groups.append(s)
groups.reverse()
s = smart_text(localeconv["mon_thousands_sep"]).join(groups)
# display fraction for non integer values
if digits and not isinstance(val, int):
s += smart_text(localeconv["mon_decimal_point"]) + "{{:.{}f}}".format(digits).format(val).split(".")[1]
# '<' and '>' are markers if the sign must be inserted between symbol and value
s = "<" + s + ">"
smb = smart_text(localeconv[international and "int_curr_symbol" or "currency_symbol"])
precedes = localeconv[val < 0 and "n_cs_precedes" or "p_cs_precedes"]
separated = localeconv[val < 0 and "n_sep_by_space" or "p_sep_by_space"]
if precedes:
s = smb + (separated and " " or "") + s
else:
s = s + (separated and " " or "") + smb
sign_pos = localeconv[val < 0 and "n_sign_posn" or "p_sign_posn"]
sign = localeconv[val < 0 and "negative_sign" or "positive_sign"]
if sign_pos == 0:
s = "(" + s + ")"
elif sign_pos == 1:
s = sign + s
elif sign_pos == 2:
s = s + sign
elif sign_pos == 3:
s = s.replace("<", sign)
elif sign_pos == 4:
s = s.replace(">", sign)
else:
# the default if nothing specified;
# this should be the most fitting sign position
s = sign + s
return s.replace("<", "").replace(">", "").replace(" ", "\u00A0")
def amount_color(amount):
if amount > 0:
return settings.LEPRIKON_COLOR_POSITIVE
elif amount < 0:
return settings.LEPRIKON_COLOR_NEGATIVE
else:
return settings.LEPRIKON_COLOR_ZERO
def ascii(value):
return unicodedata.normalize("NFKD", value).encode("ascii", errors="ignore").decode("ascii")
def comma_separated(lst):
lst = list(map(smart_text, lst))
if len(lst) > 2:
return _(", and ").join([", ".join(lst[:-1]), lst[-1]])
else:
return _(", and ").join(lst)
def get_rand_hash(length=32, stringset=string.ascii_letters + string.digits):
return "".join([stringset[i % len(stringset)] for i in [ord(x) for x in os.urandom(length)]])
def current_url(request):
if request.META["QUERY_STRING"]:
return "{}?{}".format(request.path, request.META["QUERY_STRING"])
else:
return request.path
def url_back(request):
return request.POST.get(
settings.LEPRIKON_PARAM_BACK,
request.GET.get(
settings.LEPRIKON_PARAM_BACK,
reverse("leprikon:summary"),
),
)
recursive_back_splitter = re.compile(f"[?&]{settings.LEPRIKON_PARAM_BACK}=")
def url_with_back(url, url_back):
try:
query = url_back.split("?")[1]
except IndexError:
pass
else:
try:
# try to reuse original back url
url_back = parse_qs(query)[settings.LEPRIKON_PARAM_BACK][0]
except KeyError:
pass
# remove recursive back url
url_back = recursive_back_splitter.split(url_back)[0]
return "{}?{}".format(url, urlencode({settings.LEPRIKON_PARAM_BACK: iri_to_uri(url_back)}))
def reverse_with_back(request, *args, **kwargs):
return url_with_back(reverse(*args, **kwargs), current_url(request))
def get_gender(birth_num):
return FEMALE if birth_num[2:4] > "50" else MALE
def get_birth_date(birth_num):
birth_num = birth_num.replace("/", "")
y = int(birth_num[:2])
if len(birth_num) == 9:
# before 1954
if y < 54:
year = 1900 + y
else:
year = 1800 + y
else:
year = int(date.today().year / 100) * 100 + y
if y > date.today().year % 100:
year -= 100
month = int(birth_num[2:4]) % 50 % 20
day = int(birth_num[4:6])
return date(year, month, day)
def get_age(birth_date, today=None):
today = today or date.today()
try:
birth_day_this_year = date(today.year, birth_date.month, birth_date.day)
except ValueError:
birth_day_this_year = date(today.year, birth_date.month + 1, 1)
if birth_day_this_year > today:
return today.year - birth_date.year - 1
else:
return today.year - birth_date.year
def first_upper(s):
return s[0].upper() + s[1:] if s else ""
def merge_objects(source, target, attributes=None, exclude=[]):
attributes = attributes or [f.name for f in source._meta.fields if f.name not in exclude]
for attr in attributes:
if not getattr(target, attr):
setattr(target, attr, getattr(source, attr))
return target
@transaction.atomic
def merge_users(source, target):
from .models.subjects import SubjectRegistration
target = merge_objects(source, target, ("first_name", "last_name", "email"))
target.date_joined = (
min(source.date_joined, target.date_joined)
if source.date_joined and target.date_joined
else source.date_joined or target.date_joined
)
target.last_login = (
max(source.last_login, target.last_login)
if source.last_login and target.last_login
else source.last_login or target.last_login
)
try:
leader = source.leprikon_leader
leader.user = target
leader.save()
except ObjectDoesNotExist:
pass
except IntegrityError:
# both users are leaders
raise
for attr in (
"user",
"created_by",
"approved_by",
"payment_requested_by",
"refund_offered_by",
"cancelation_requested_by",
"canceled_by",
):
SubjectRegistration.objects.filter(**{attr: source}).update(**{attr: target})
for sp in source.leprikon_participants.all():
tp = target.leprikon_participants.filter(birth_num=sp.birth_num).first()
if tp:
tp = merge_objects(sp, tp, exclude=("id", "user", "birth_num"))
tp.save()
else:
sp.user = target
sp.save()
for sp in source.leprikon_parents.all():
tp = target.leprikon_parents.filter(first_name=sp.first_name, last_name=sp.last_name).first()
if tp:
tp = merge_objects(sp, tp, exclude=("id", "user"))
tp.save()
else:
sp.user = target
sp.save()
for sbi in source.leprikon_billing_info.all():
tbi = target.leprikon_billing_info.filter(name=sbi.name).first()
if tbi:
tbi = merge_objects(sbi, tbi, exclude=("id", "user"))
tbi.save()
else:
sbi.user = target
sbi.save()
for mr in source.leprikon_messages.all():
if not target.leprikon_messages.filter(message=mr.message).exists():
mr.recipient = target
mr.save()
try:
# support social auth
source.social_auth.update(user=target)
except AttributeError:
pass
from .rocketchat import RocketChat
RocketChat().merge_users(source, target)
source.delete()
def spayd(*items):
s = "SPD*1.0*" + "*".join(
"%s:%s" % (k, unicodedata.normalize("NFKD", str(v).replace("*", "")).encode("ascii", "ignore").upper().decode())
for k, v in sorted(items)
)
s += "*CRC32:%x" % zlib.crc32(s.encode())
return s.upper()
def paragraph(text):
return mark_safe(f"<p>{text.strip()}</p>".replace("\n", "<br/>\n").replace("<br/>\n<br/>\n", "</p>\n\n<p>"))
lazy_paragraph = lazy(paragraph, str)
| 2.203125 | 2 |
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/GL/NV/texture_expand_normal.py | JE-Chen/je_old_repo | 0 | 12792194 | '''OpenGL extension NV.texture_expand_normal
This module customises the behaviour of the
OpenGL.raw.GL.NV.texture_expand_normal to provide a more
Python-friendly API
Overview (from the spec)
This extension provides a remapping mode where unsigned texture
components (in the range [0,1]) can be treated as though they
contained signed data (in the range [-1,+1]). This allows
applications to easily encode signed data into unsigned texture
formats.
The functionality of this extension is nearly identical to the
EXPAND_NORMAL_NV remapping mode provided in the NV_register_combiners
extension, although it applies even if register combiners are used.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/texture_expand_normal.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.texture_expand_normal import *
from OpenGL.raw.GL.NV.texture_expand_normal import _EXTENSION_NAME
def glInitTextureExpandNormalNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | 2.09375 | 2 |
runner/runner.py | alanbchristie/PyKubePressureCooker | 4 | 12792195 | #!/usr/bin/env python3
"""An abstract _Runner_ module.
"""
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from enum import Enum, auto, unique
import threading
import uuid
RUNNER_IMAGE = 'alanbchristie/pydatalister'
RUNNER_TAG = 'latest'
@unique
class RunnerState(Enum):
"""Runner execution states.
"""
# The first event is always BEGIN.
# The last and final event is always END.
#
# The normal event sequence, which relates to a runner
# that's successfully created, runs, completes and is then
# automatically deleted is represented by the following sequence:
#
# BEGIN - PREPARING - RUNNING - COMPLETE - END
BEGIN = auto() # The Runner initial state (assigned in begin())
PREPARING = auto() # The Runner is preparing to run
RUNNING = auto() # The Runner is Running
COMPLETE = auto() # The Runner has completed its actions (naturally)
STOPPING = auto() # The Runner is stopping - in response to a stop()
STOPPED = auto() # The runner has stopped - in response to a stop()
FAILED = auto() # There has been a problem
END = auto() # The last event, issued when the runner's gone
RunnerStateTuple = namedtuple('Runner', ['state', 'context', 'msg'])
class Runner(threading.Thread, metaclass=ABCMeta):
"""The ``Runner`` base class, from which all Runners are derived.
"""
def __init__(self, callback, callback_context):
"""The basic Runner initialser.
"""
threading.Thread.__init__(self)
assert callable(callback)
self._state_callback = callback
self._callback_context = callback_context
self._runner_state = None
self._stopping = False
self._runner_uuid = uuid.uuid4()
# A synchronisation lock
self.lock = threading.Lock()
print('New Runner() {%s}' % self._runner_uuid)
def _set_runner_state(self, runner_state, msg=None):
"""Sets the runner state and informs the user.
:param runner_state: The new Runner state
:type runner_state: ``RunnerState
"""
assert isinstance(runner_state, RunnerState)
self._runner_state = runner_state
print('New RunnerState (%s) {%s}' % (runner_state, self._runner_uuid))
# Inform the user of each state change.
# The receiver must expect a `RunnerStateTuple` as the first argument
# in the callback method.
assert self._state_callback
rso = RunnerStateTuple(runner_state, self._callback_context, msg)
self._state_callback(rso, self._callback_context)
@abstractmethod
def begin(self):
"""Starts the Runner. The state_callback will be supplied with
instances of the RunnerState as the runner progresses. This
method must only be called once.
This method must not block.
"""
assert self._runner_state is None
self._set_runner_state(RunnerState.BEGIN)
def end(self):
"""Stops the Runner. This method should be called only of a Runner is
to be prematurely stopped. Runners have a built-in lifetime and are
normally left to complete naturally.
If the Runner is still running this method introduces the
``RunnerState`` values of ``STOPPING`` and ``STOPPED``, normally not
seen.
This method does nothing if the Runner is already stopping or has
completed.
This method must not block.
"""
print('End called... {%s}' % self._runner_uuid)
if self._stopping:
print('Ignoring (already in progress). {%s}' %
self._runner_uuid)
return
elif self._runner_state in [RunnerState.COMPLETE,
RunnerState.END]:
print('Ignoring (Runner already gone). {%s}' %
self._runner_uuid)
return
self._set_runner_state(RunnerState.STOPPING)
# Just set the 'stopping' field (and change the state).
# This should cause the main thread to exit - it's
# the responsibility of the implementing class.
self._stopping = True
print('End is nigh! {%s}' % self._runner_uuid)
| 2.75 | 3 |
Betsy/Betsy/modules/add_md_tags_to_bam_folder.py | jefftc/changlab | 9 | 12792196 | <reponame>jefftc/changlab<filename>Betsy/Betsy/modules/add_md_tags_to_bam_folder.py
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
out_path):
import os
from genomicode import config
from genomicode import filelib
from genomicode import parallel
from genomicode import alignlib
from Betsy import module_utils
## Importing pysam is hard!
#import sys
#sys_path_old = sys.path[:]
#sys.path = [x for x in sys.path if x.find("RSeQC") < 0]
#import pysam
#sys.path = sys_path_old
bam_node, ref_node = antecedents
bam_filenames = module_utils.find_bam_files(bam_node.identifier)
assert bam_filenames, "No .bam files."
ref = alignlib.create_reference_genome(ref_node.identifier)
filelib.safe_mkdir(out_path)
# list of (in_filename, err_filename, out_filename)
jobs = []
for in_filename in bam_filenames:
p, f = os.path.split(in_filename)
s, ext = os.path.splitext(f)
log_filename = os.path.join(out_path, "%s.log" % s)
out_filename = os.path.join(out_path, f)
assert in_filename != out_filename
x = in_filename, log_filename, out_filename
jobs.append(x)
# Don't do this. Need MD, NM, NH in
# summarize_alignment_cigar. To be sure, just redo it.
## If the files already have MD tags, then just symlink the
## files. Don't add again.
#i = 0
#while i < len(jobs):
# in_filename, out_filename = jobs[i]
#
# handle = pysam.AlignmentFile(in_filename, "rb")
# align = handle.next()
# tag_dict = dict(align.tags)
# if "MD" not in tag_dict:
# i += 1
# continue
# # Has MD tags. Just symlink and continue.
# os.symlink(in_filename, out_filename)
# del jobs[i]
# Make a list of samtools commands.
# Takes ~200 Mb per process, so should not be a big issue.
samtools = filelib.which_assert(config.samtools)
sq = parallel.quote
commands = []
for x in jobs:
in_filename, log_filename, out_filename = x
# samtools calmd -b <in.bam> <ref.fasta> > <out.bam>
# May generate error:
# [bam_fillmd1] different NM for read
# 'ST-J00106:118:H75L3BBXX:3:2128:21846:47014': 0 -> 19
# Pipe stderr to different file.
x = [
samtools,
"calmd", "-b",
sq(in_filename),
sq(ref.fasta_file_full),
]
x = " ".join(x)
x = "%s 2> %s 1> %s" % (x, sq(log_filename), sq(out_filename))
commands.append(x)
parallel.pshell(commands, max_procs=num_cores)
# Make sure the analysis completed successfully.
x = [x[-1] for x in jobs]
filelib.assert_exists_nz_many(x)
def name_outfile(self, antecedents, user_options):
return "md.bam"
| 2.0625 | 2 |
examples/models.py | sayanjap/DynamicForms | 42 | 12792197 | <gh_stars>10-100
from datetime import timedelta
from django.core.validators import MaxValueValidator, MinValueValidator, RegexValidator
from django.db import models
from django.utils import timezone
class Validated(models.Model):
"""
Shows validation capabilities
"""
code = models.CharField(max_length=10, validators=[
RegexValidator(r'\w\w\d+', 'Please enter a string starting with two characters, followed by up to 8 numbers')
])
enabled = models.BooleanField()
amount = models.IntegerField(null=True, blank=True, validators=[
# This one should be interesting: will a blank value pass the Min validator? It should!
MinValueValidator(5),
MaxValueValidator(10)
]) # Bit mask. 1=apartment_number, ..., 32=delay
item_type = models.IntegerField(choices=(
(0, 'Choice 1'),
(1, 'Choice 2'),
(2, 'Choice 3'),
(3, 'Choice 4'),
))
item_flags = models.CharField(max_length=4, blank=True, choices=(
# this one will be a multi-choice field so you will need to override it in form
('A', 'A'),
('B', 'B'),
('C', 'C'),
('D', 'D'),
), validators=[
RegexValidator(r'^[ABC]*$', 'Only options A-C may be chosen', 'regex')
])
comment = models.TextField(null=True, blank=True)
class HiddenFields(models.Model):
"""
Shows dynamically changing field visibility
"""
note = models.CharField(max_length=20, help_text='Enter abc to hide unit field')
unit = models.CharField(max_length=10, choices=(
(None, 'No additional data'),
('pcs', 'Pieces'),
('wt', 'Weight'),
('cst', 'Custom'),
), null=True, blank=True)
int_fld = models.IntegerField(verbose_name='Quantity', null=True, blank=True)
qty_fld = models.FloatField(verbose_name='Weight', null=True, blank=True,
help_text='Fell free to use a decimal point / comma')
cst_fld = models.CharField(max_length=80, verbose_name='Comment', null=True, blank=True,
help_text='Enter additional info here')
additional_text = models.CharField(max_length=80, null=True, blank=True,
help_text='Now that you have shown me, please enter something')
class PageLoad(models.Model):
"""
Shows how DynamicForms handles dynamic loading of many records in ViewSet result
"""
description = models.CharField(max_length=20, help_text='Item description')
choice = models.IntegerField(choices=((1, 'Choice 1'),
(2, 'Choice 2'),
(3, 'Choice 3')), null=False, blank=False, default=1)
class Filter(models.Model):
"""
Shows how DynamicForms handles filers
"""
char_field = models.CharField(max_length=20, help_text='Char field', verbose_name='Char field')
datetime_field = models.DateTimeField(help_text='Datetime field', verbose_name='Datetime field')
int_field = models.IntegerField(help_text='Integer field', verbose_name='Integer field')
int_choice_field = models.IntegerField(choices=(
(0, 'Choice 1'),
(1, 'Choice 2'),
(2, 'Choice 3'),
(3, 'Choice 4'),), help_text='Integer field with choices', verbose_name='Integer field with choices')
bool_field = models.BooleanField(help_text='Boolean field', verbose_name='Boolean field')
name = models.CharField(max_length=20, help_text='Name field', verbose_name='Name field', null=True, blank=True)
class BasicFields(models.Model):
"""
Shows basic available fields in DynamicForms
"""
boolean_field = models.BooleanField(null=False, default=False)
nullboolean_field = models.BooleanField(null=True)
char_field = models.CharField(null=True, max_length=32)
email_field = models.EmailField(null=True)
slug_field = models.SlugField(null=True)
url_field = models.URLField(null=True)
uuid_field = models.UUIDField(null=True)
ipaddress_field = models.GenericIPAddressField(null=True)
integer_field = models.IntegerField(null=True)
nullint_field = models.IntegerField(null=True, blank=True)
float_field = models.FloatField(null=True)
decimal_field = models.DecimalField(null=True, max_digits=5, decimal_places=2)
datetime_field = models.DateTimeField(null=True)
date_field = models.DateField(null=True)
time_field = models.TimeField(null=True)
duration_field = models.DurationField(null=True)
password_field = models.CharField(null=True, max_length=32)
class Relation(models.Model):
"""
Model related to AdvancedFields model
"""
name = models.CharField(max_length=16)
def __str__(self):
return self.name
class AdvancedFields(models.Model):
"""
Shows advanced available fields in DynamicForms
"""
regex_field = models.CharField(max_length=256)
choice_field = models.CharField(null=True, max_length=8)
single_choice_field = models.CharField(null=True, max_length=8)
multiplechoice_field = models.CharField(null=True, max_length=8)
filepath_field = models.FilePathField(null=True)
file_field = models.FileField(upload_to='examples/', null=True, blank=True)
file_field_two = models.FileField(upload_to='examples2/', null=True, blank=True)
image_field = models.ImageField(upload_to='examples/', null=True, blank=True)
# Model attribute for ReadOnlyField
hidden_field = models.DateTimeField(default=timezone.now)
@property
def readonly_field(self):
return self.hidden_field > timezone.now() - timedelta(days=1)
"""ListField and DictField not supported in HTML forms in DRF"""
# list_field = models.?
# dict_field = models.?
"""JSONField available only for PostgreSQL"""
# json_field = models.JSONField()
# serializer_method_field = models.?
# model_field = models.?
# Relations
# string_related_field, which is always read_only is defined only in serializer
# and primary_key_related_field is defined as its source
primary_key_related_field = models.OneToOneField(
Relation,
on_delete=models.CASCADE,
null=True,
related_name='primary'
)
slug_related_field = models.ForeignKey(
Relation,
on_delete=models.CASCADE,
null=True,
related_name='slug'
)
hyperlinked_related_field = models.ManyToManyField(
Relation,
related_name='hyper_related'
)
hyperlinked_identity_field = models.ForeignKey(
Relation,
on_delete=models.CASCADE,
null=True,
related_name='hyper_identity'
)
def __str__(self):
return 'Advanced field {self.id}'.format(**locals())
class RefreshType(models.Model):
"""
Shows how DynamicForms handles different refresh types
"""
description = models.CharField(max_length=20, help_text='Item description')
rich_text_field = models.TextField(blank=True, null=True)
| 2.515625 | 3 |
strings/jaden_case.py | ethyl2/code_challenges | 0 | 12792198 | <gh_stars>0
"""
https://www.codewars.com/kata/5390bac347d09b7da40006f6
Given a string, return a string in which each word is capitalized
Example:
Not Jaden-Cased: "How can mirrors be real if our eyes aren't real"
Jaden-Cased: "How Can Mirrors Be Real If Our Eyes Aren't Real"
"""
import string
def to_jaden_case(string: str) -> str:
"""
My implementation
"""
return ' '.join([word.capitalize() for word in string.split()])
def to_jaden_case2(string_input: str) -> str:
"""
Another person's version. string.capwords() is handy.
"""
return string.capwords(string_input)
print(to_jaden_case("How can mirrors be real if our eyes aren't real"))
print(to_jaden_case2("How can mirrors be real if our eyes aren't real"))
| 4.15625 | 4 |
tests/test_chinormfilter.py | po3rin/chinormfilter | 2 | 12792199 | from chinormfilter import __version__
from chinormfilter.cli import Filter
def test_version():
assert __version__ == '0.5.0'
def test_kuro2sudachi_cli(capsys):
f = Filter(dict_type="full")
assert f.duplicated("林檎,りんご") is True
assert f.duplicated("レナリドミド, レナリドマイド") is False
assert f.duplicated("エダマメ,枝豆") is True
assert f.duplicated("えだまめ,枝豆") is True
assert f.duplicated("飲む,呑む") is True
assert f.duplicated("エダマメ => 枝豆") is True
assert f.duplicated("tlc => tlc,全肺気量") is False
assert f.duplicated("リンたんぱく質,リン蛋白質,リンタンパク質") is True
assert f.duplicated("グルタチオン => グルタチオン,タチオン,ランデールチオン") is False
| 2.28125 | 2 |
test/test_getter.py | taleinat/funcy-chain | 2 | 12792200 | <filename>test/test_getter.py
from funcy_chain import getter
def test_long_path(Chain):
data = [{"a": {"b": {"c": [1, 2, {"d": [3, {1: 2}]}]}}}]
assert Chain(data).map(getter(["a", "b", "c", 2, "d", 1, 1])).value == [2]
def test_names(Chain):
data = {
"user1": {
"firstname": "Alice",
"lastname": "Liddle",
},
"user2": {
"firstname": "Bob",
"lastname": "Kennedy",
},
}
names = (
Chain(data).items().map(getter([1, "lastname"], [1, "firstname"])).sort().map(", ".join)
).value
assert names == ["<NAME>", "<NAME>"]
| 2.609375 | 3 |
ValidacionEmail/validacionemail.py | BrianMarquez3/Python-Course | 20 | 12792201 | # Validacion de Correo Electronico en Python
contador = 0
email = input("Ingrese su correo electronico: ")
for i in email:
if ("@" or "."):
contador = contador+1
if contador == 2:
print("Correo Valido")
else:
print("Correo Invalido")
| 3.921875 | 4 |
tests/demo_migrations/migrations.py | achaussier/copier | 0 | 12792202 | #!/usr/bin/env python3
import json
import os
import sys
NAME = "{VERSION_FROM}-{VERSION_CURRENT}-{VERSION_TO}-{STAGE}.json"
with open(NAME.format(**os.environ), "w") as fd:
json.dump(sys.argv, fd)
| 2 | 2 |
Exp_train.py | KampfWut/MORE | 2 | 12792203 | # Author: <NAME>
# Data: 2020-01-10
# Function: Run training
#-------------------------- import package --------------------------#
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
import winsound
from Code_utils import *
from Code_models import GCN, MLP, MORE
#--------------------------- main process ---------------------------#
# Set random seed
seed = 123
np.random.seed(seed)
tf.set_random_seed(seed)
# Settings
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset', 'cora', 'Dataset string.') # 'cora', 'citeseer', 'pubmed'
flags.DEFINE_string('model', 'MORE', 'Model string.') # 'gcn', 'gcn_cheby', 'dense', 'MORE'
flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.')
flags.DEFINE_integer('epochs', 500, 'Number of epochs to train.')
flags.DEFINE_float('dropout', 0.5, 'Dropout rate (1 - keep probability).')
flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on embedding matrix.')
flags.DEFINE_integer('early_stopping', 30, 'Tolerance for early stopping (# of epochs).')
flags.DEFINE_integer('hidden1', 16, 'Number of units in hidden layer 1.')
flags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial degree.')
flags.DEFINE_integer('motif_feature_dim', 6, 'the dim of motif features')
flags.DEFINE_list('property_embedding_hidden', [256], 'the hidden layer number of property embedding')
flags.DEFINE_list('motif_embedding_hidden', [256], 'the hidden layer number of motif embedding')
flags.DEFINE_list('integration_hidden', [], 'the hidden layer number of integration')
flags.DEFINE_string('embeding_combination_method', "Hadamard", 'the method of embedding combination')
# embeding_combination_method ---- "Hadamard", "Summation", "Connection"
# batch_run
use_batch = False
if use_batch:
FLAGS.model = 'MotifGCN'
lr = [0.01, 0.001, 0.0003, 0.003]
le = [300, 500, 1000, 2000]
mo = ["Hadamard", "Summation", "Connection"]
la = [32, 64, 128, 256, 512]
mode_list = []
for i in range(0, 4):
temp1 = [lr[i], le[i]]
for j in range(0, 3):
temp2 = temp1 + [mo[j]]
for k in range(0, 5):
temp3 = temp2 + [la[k]]
mode_list.append(temp3)
mode = mode_list[59] # 0-14, 15-29, 30-44, 45-59
print(mode)
FLAGS.learning_rate = mode[0]
FLAGS.epochs = mode[1]
FLAGS.embeding_combination_method = mode[2]
FLAGS.motif_embedding_hidden = [mode[3]]
FLAGS.property_embedding_hidden = [mode[3]]
# Load data
adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask, motiffeatures = load_data(FLAGS.dataset)
# Some preprocessing
features = preprocess_features(features)
motiffeatures = preprocess_features(motiffeatures)
if FLAGS.model == 'gcn':
support = [preprocess_adj(adj)]
num_supports = 1
model_func = GCN
elif FLAGS.model == 'gcn_cheby':
support = chebyshev_polynomials(adj, FLAGS.max_degree)
num_supports = 1 + FLAGS.max_degree
model_func = GCN
elif FLAGS.model == 'dense':
support = [preprocess_adj(adj)] # Not used
num_supports = 1
model_func = MLP
elif FLAGS.model == 'MORE':
support = [preprocess_adj(adj)] # Not used
num_supports = 1
model_func = MORE
else:
raise ValueError('Invalid argument for model: ' + str(FLAGS.model))
# Define placeholders
placeholders = {
'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)],
'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)),
'motiffeatures': tf.sparse_placeholder(tf.float32, shape=tf.constant(motiffeatures[2], dtype=tf.int64)),
'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])),
'labels_mask': tf.placeholder(tf.int32),
'dropout': tf.placeholder_with_default(0., shape=()),
'num_features_nonzero': tf.placeholder(tf.int32), # helper variable for sparse dropout
'num_motif_features_nonzero': tf.placeholder(tf.int32)
}
# Create model
model = model_func(placeholders, input_dim=features[2][1], logging=True)
# Initialize session
sess = tf.Session()
# Define model evaluation function
def evaluate(features, support, labels, mask, motiffeatures, placeholders):
t_test = time.time()
feed_dict_val = construct_feed_dict(features, support, labels, mask, motiffeatures, placeholders)
outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val)
return outs_val[0], outs_val[1], (time.time() - t_test)
# Init variables
sess.run(tf.global_variables_initializer())
cost_val = []
train_acc, val_acc, Tacc = [], [], []
train_loss, val_loss, Tloss = [], [], []
# Train model
train_starttime = time.time()
train_time_list = []
stop_epoch = 0
for epoch in range(FLAGS.epochs):
t = time.time()
stop_epoch = epoch + 1
# Construct feed dictionary
feed_dict = construct_feed_dict(features, support, y_train, train_mask, motiffeatures, placeholders)
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
# Training step
outs = sess.run([model.opt_op, model.loss, model.accuracy], feed_dict=feed_dict)
train_acc.append(outs[2])
train_loss.append(outs[1])
# Validation
cost, acc, duration = evaluate(features, support, y_val, val_mask, motiffeatures, placeholders)
cost_val.append(cost)
val_acc.append(acc)
val_loss.append(cost)
# Print results
print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(outs[1]),
"train_acc=", "{:.5f}".format(outs[2]), "val_loss=", "{:.5f}".format(cost),
"val_acc=", "{:.5f}".format(acc), "time=", "{:.5f}".format(time.time() - t))
train_time_list.append(time.time() - t)
test_cost, test_acc, test_duration = evaluate(features, support, y_test, test_mask, motiffeatures, placeholders)
Tacc.append(test_acc)
Tloss.append(test_cost)
if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]):
print("Early stopping...")
break
print("Optimization Finished!")
train_time = time.time() - train_starttime
# Tacc = Tacc[-max(FLAGS.early_stopping * 2, 20):]
# Testing
test_starttime = time.time()
test_cost, test_acc, test_duration = evaluate(features, support, y_test, test_mask, motiffeatures, placeholders)
print("Test set results:", "cost=", "{:.5f}".format(test_cost),
"accuracy=", "{:.5f}".format(test_acc), "time=", "{:.5f}".format(test_duration))
print("Max test acc = {:.5f}".format(max(Tacc)))
test_time = time.time() - test_starttime
# Save
with open("Result\\Train_log.csv", mode='a') as f:
f.write("{},{},{},{},{},{},{},{},{},{:.4f},{:.4f},(best={:.4f}),{:.4f},{},{:.6f},{:.6f},{:.6f}\n".\
format(seed,FLAGS.dataset,FLAGS.model,FLAGS.learning_rate, FLAGS.dropout, FLAGS.embeding_combination_method,\
str(FLAGS.property_embedding_hidden), str(FLAGS.motif_embedding_hidden), str(FLAGS.integration_hidden),\
test_acc,test_cost, max(Tacc),test_duration,stop_epoch, train_time, np.mean(train_time_list), test_time))
with open("Result\\Loss.csv", mode='a') as f:
for i in train_loss:
f.write("{:.6f},".format(i))
f.write("\n")
for i in val_loss:
f.write("{:.6f},".format(i))
f.write("\n")
for i in Tloss:
f.write("{:.6f},".format(i))
f.write("\n")
with open("Result\\Acc.csv", mode='a') as f:
for i in train_acc:
f.write("{:.6f},".format(i))
f.write("\n")
for i in val_acc:
f.write("{:.6f},".format(i))
f.write("\n")
for i in Tacc:
f.write("{:.6f},".format(i))
f.write("\n")
# Sound
duration = 500 # millisecond
freq = 600 # Hz
winsound.Beep(freq, duration) | 2.09375 | 2 |
cluster/multi_gpu_test.py | diegoaldarondo/dannce | 0 | 12792204 | <reponame>diegoaldarondo/dannce
"""Tests for locomotion.tasks.two_tap."""
import cluster.multi_gpu as multi_gpu
import functools
from absl.testing import absltest
import numpy as np
import os
DEMO_PATH = "../demo/markerless_mouse1"
CONFIG_PATH = "../tests/configs/dannce_mouse_config.yaml"
DANNCE_PATH = "../tests/configs/label3d_dannce.mat"
class MultiGpuTest(absltest.TestCase):
def test_dannce_predict_help_message(self):
os.system("dannce-predict-multi-gpu --help")
def test_com_predict_help_message(self):
os.system("com-predict-multi-gpu --help")
def test_dannce_predict_batch_params(self):
handler = multi_gpu.MultiGpuHandler(
CONFIG_PATH,
n_samples_per_gpu=100,
verbose=False,
test=True,
dannce_file=DANNCE_PATH,
)
batch_params, _ = handler.submit_dannce_predict_multi_gpu()
self.assertTrue(os.path.exists(handler.batch_param_file))
self.assertTrue(len(batch_params) == 10)
def test_com_predict_batch_params(self):
handler = multi_gpu.MultiGpuHandler(
CONFIG_PATH,
n_samples_per_gpu=100,
verbose=False,
test=True,
dannce_file=DANNCE_PATH,
)
batch_params, _ = handler.submit_com_predict_multi_gpu()
self.assertTrue(os.path.exists(handler.batch_param_file))
self.assertTrue(len(batch_params) == 180)
def test_raises_error_if_no_dannce_file(self):
with self.assertRaises(FileNotFoundError):
handler = multi_gpu.MultiGpuHandler(
CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True
)
def test_dannce_predict_multi_gpu_cli(self):
cmd = "dannce-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s" % (
CONFIG_PATH,
DANNCE_PATH,
)
os.system(cmd)
def test_com_predict_multi_gpu_cli(self):
cmd = "com-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s" % (
CONFIG_PATH,
DANNCE_PATH,
)
os.system(cmd)
if __name__ == "__main__":
absltest.main()
| 2.109375 | 2 |
twick/response.py | jsvine/twick | 63 | 12792205 | from datetime import datetime
from twick.tweet import Tweet
import twick.settings as settings
class Response(object):
def __init__(self, raw):
self.raw = raw
self.tweets = list(map(Tweet, raw["statuses"]))
self.metadata = dict(raw["search_metadata"])
self.timestamp = datetime.now()
def to_row(self):
return self.metadata
| 2.890625 | 3 |
authors/apps/articles/migrations/0002_auto_20190515_1247.py | andela/ah-backend-dojo | 3 | 12792206 | # Generated by Django 2.1 on 2019-05-15 12:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('article_tag', '0001_initial'),
('articles', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='readingstats',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='favoritearticle',
name='article',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Article'),
),
migrations.AddField(
model_name='favoritearticle',
name='favorited_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='article',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, to_field='username'),
),
migrations.AddField(
model_name='article',
name='tagList',
field=models.ManyToManyField(related_name='articles', to='article_tag.ArticleTag'),
),
migrations.AlterUniqueTogether(
name='readingstats',
unique_together={('article', 'user')},
),
]
| 1.648438 | 2 |
flask_toolkit/shared/payload_wrap.py | Creditas/flask-toolkit | 3 | 12792207 | <filename>flask_toolkit/shared/payload_wrap.py
import json
class PayloadWrap:
_payload = dict()
def __init__(self, payload):
if isinstance(payload, str):
self._payload = json.loads(payload)
else:
self._payload = payload
def to_json(self):
return json.dumps(self._payload)
| 2.703125 | 3 |
tests/models.py | traktor88/test | 0 | 12792208 | from django.db import models
from tagging.fields import TagField
class Perch(models.Model):
size = models.IntegerField()
smelly = models.BooleanField(default=True)
class Parrot(models.Model):
state = models.CharField(maxlength=50)
perch = models.ForeignKey(Perch, null=True)
def __str__(self):
return self.state
class Meta:
ordering = ['state']
class Link(models.Model):
name = models.CharField(maxlength=50)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
class Article(models.Model):
name = models.CharField(maxlength=50)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
class FormTest(models.Model):
tags = TagField()
| 2.140625 | 2 |
scripts/validate_graph.py | dongguosheng/deepwalk | 22 | 12792209 | # -*- coding: gbk -*-
import sys
def validate(filename):
n_max = -1
v_set = set()
with open(filename) as fin:
n_line = 0
for line in fin:
n_line += 1
v_list = line.rstrip().split()
if 'adj' in filename:
if len(v_list) < 2:
print '(invalid) at line %d' % n_line
return
if 'edge' in filename:
if len(v_list) != 2 and len(v_list) != 3:
print '(invalid) at line %d' % n_line
return
if len(v_list) == 3:
v_list = v_list[: 2]
for vid in v_list:
vid = int(vid)
if vid < 0:
print '(invalid) at line %d' % n_line
return
v_set.add(vid)
if vid > n_max:
n_max = vid
if len(v_set) == n_max and 0 not in v_set:
print '(valid) start index: 1, max vertex idx: %d, total %d vertex.' % (n_max, len(v_set))
elif len(v_set) == n_max + 1 and 0 in v_set:
print '(valid) start index: 0, max vertex idx: %d, total %d vertex.' % (n_max, len(v_set))
else:
print '(valid) start index: 0, max vertex idx: %d, total %d vertex.' % (n_max, len(v_set))
print 'vertex ids are not continuous, lost %d id.' % (n_max - len(v_set) if 0 in v_set else n_max - len(v_set) + 1)
def main():
if len(sys.argv) != 2:
print 'validate_graph.py graph_file'
else:
validate(sys.argv[1])
if __name__ == '__main__':
main()
| 3.1875 | 3 |
scripts/slave/recipes/simple_ci.py | bopopescu/chromium-build | 0 | 12792210 | <filename>scripts/slave/recipes/simple_ci.py
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine.recipe_api import Property
DEPS = [
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/raw_io',
'recipe_engine/step',
]
PROPERTIES = {
# New Gerrit patch properties.
'patch_storage': Property(kind=str, default=None),
'patch_gerrit_url': Property(kind=str, default=None),
'patch_repository_url': Property(kind=str, default=None),
'patch_ref': Property(kind=str, default=None),
# Non-patch jobs properties.
'repository': Property(kind=str, help='Full url to a Git repository',
default=None, param_name='repo_url'),
'refspec': Property(kind=str, help='Refspec to checkout', default='master'),
}
def RunSteps(api, repo_url, refspec, patch_storage, patch_repository_url,
patch_ref):
if patch_storage:
assert patch_storage == 'gerrit'
assert patch_repository_url and patch_ref
repo_url = patch_repository_url
refspec = patch_ref
assert repo_url and refspec, 'repository url and refspec must be given'
assert repo_url.startswith('https://')
api.step('git init', ['git', 'init'])
api.step('git reset', ['git', 'reset', '--hard'])
api.step('git fetch', ['git', 'fetch', repo_url, '%s' % refspec])
api.step('git checkout', ['git', 'checkout', 'FETCH_HEAD'])
api.step('git submodule update', ['git', 'submodule', 'update',
'--init', '--recursive'])
result = api.python.inline(
'read tests',
# Multiplatform "cat"
"with open('infra/config/ci.cfg') as f: print f.read()",
stdout=api.raw_io.output_text(),
step_test_data=(lambda:
api.raw_io.test_api.stream_output(
'./a.sh\npython b.py\npython c.py args')))
tests = []
for l in result.stdout.splitlines():
l = l.strip()
if l and not l.startswith('#'):
tests.append(l)
with api.step.defer_results():
for l in sorted(tests):
name = 'test: %s' % l
cmd = l.split()
if cmd[0] == 'python' and len(cmd) >= 2:
api.python(name, script=cmd[1], args=cmd[2:])
else:
api.step(name, cmd)
def GenTests(api):
yield api.test('ci') + api.properties(
repository='https://chromium.googlesource.com/infra/infra',
)
yield api.test('cq_try') + api.properties.tryserver(
gerrit_project='infra/infra',
)
yield api.test('ci_fail_but_run_all') + api.properties(
repository='https://chromium.googlesource.com/infra/infra',
refspec='release-52'
) + api.override_step_data('test: ./a.sh', retcode=1)
| 1.757813 | 2 |
io_sedASCII/import_amf.py | naomiEve/io_sedASCII | 1 | 12792211 | import bpy
from bpy_extras.mesh_utils import ngon_tessellate
from . import se3
def get_se3_mesh_form_file(filepath):
file_query = se3.ASCIIFileQuery(filepath)
version = file_query.get_num_value("SE_MESH")
mesh = se3.Mesh(version)
num_of_layers = file_query.get_num_value("LAYERS")
file_query.follows_block_begin_decl() #open layers block
processed_layers = 0
while processed_layers < num_of_layers:
layer_name = file_query.get_str_value("LAYER_NAME")
layer_index = file_query.get_long_value("LAYER_INDEX")
file_query.follows_block_begin_decl() #open layer block
layer = se3.Layer(layer_name, layer_index)
mesh.layers.append(layer)
num_of_maps = file_query.get_long_value("VERTEX_MAPS")
file_query.follows_block_begin_decl() #open vertex maps block
processed_maps = 0
num_of_texcoord_maps = 0
num_of_weight_maps = 0
num_of_morph_maps = 0
while processed_maps < num_of_maps:
map_type = file_query.get_map_type()
map_name = file_query.get_map_name()
file_query.follows_block_begin_decl() #open vertex map block
if map_type == se3.VERTEX_MAP_TYPE_MORPH:
type_index = num_of_morph_maps
get_map_elem = file_query.get_morph_elem
map_is_relative = file_query.get_bool_value("RELATIVE")
num_of_morph_maps += 1
elif map_type == se3.VERTEX_MAP_TYPE_TEXCOORD:
type_index = num_of_texcoord_maps
get_map_elem = file_query.get_texcoord_elem
num_of_texcoord_maps += 1
elif map_type == se3.VERTEX_MAP_TYPE_WEIGHT:
type_index = num_of_weight_maps
get_map_elem = file_query.get_weight_elem
num_of_weight_maps += 1
map = se3.VertexMap(map_type, map_name, map_is_relative)
map.type_index = type_index
num_of_map_elems = file_query.get_long_value("ELEMENTS")
file_query.follows_block_begin_decl() # open elements block
processed_elems = 0
while processed_elems < num_of_map_elems:
file_query.follows_block_begin_decl() #open element block
map.elements.append(get_map_elem())
file_query.follows_block_end_decl() #close element block
processed_elems += 1
file_query.follows_block_end_decl() #close elements block
processed_maps += 1
layer.vertex_maps_append(map)
file_query.follows_block_end_decl() #close vertex map block
file_query.follows_block_end_decl() #close vertex maps block
num_of_verts = file_query.get_long_value("VERTICES")
file_query.follows_block_begin_decl() #open vertices block
num_of_processed_vertices = 0
while num_of_processed_vertices < num_of_verts:
vertex = se3.Vertex()
morph_pointers = vertex.morph_pointers
weight_pointers = vertex.weight_pointers
uv_pointers = vertex.uv_pointers
file_query.follows_block_begin_decl() #open vertex block
num_of_pointers = file_query.get_num_of_values()
num_of_processed_pointers = 0
is_last_pointer = False
last_pointer_index = num_of_pointers - 1
while num_of_processed_pointers < num_of_pointers:
if num_of_processed_pointers == last_pointer_index:
is_last_pointer = True
vertex_data_pointer = file_query.get_vertex_data_pointer(is_last_pointer)
vertex_map_index = vertex_data_pointer[0]
vertex_map_type = layer.vertex_maps[vertex_map_index].type
if vertex_map_type == se3.VERTEX_MAP_TYPE_MORPH:
morph_pointers.append(vertex_data_pointer)
elif vertex_map_type == se3.VERTEX_MAP_TYPE_WEIGHT:
weight_pointers.append(vertex_data_pointer)
elif vertex_map_type == se3.VERTEX_MAP_TYPE_TEXCOORD:
uv_pointers.append(vertex_data_pointer)
num_of_processed_pointers += 1
layer.vertices.append(vertex)
file_query.follows_block_end_decl() #close vertex block
num_of_processed_vertices += 1
file_query.follows_block_end_decl() #close vertices block
num_of_polys = file_query.get_long_value("POLYGONS")
file_query.follows_block_begin_decl() #open polygons block
processed_polys = 0
while processed_polys < num_of_polys:
poly = []
file_query.follows_block_begin_decl() #open polygon block
num_of_values = file_query.get_num_of_values()
processed_values = 0
is_last_value = False
last_value_idx = num_of_values - 1
while processed_values < num_of_values:
if processed_values == last_value_idx:
is_last_value = True
poly.append(file_query.get_vert_idx(is_last_value))
processed_values += 1
file_query.follows_block_end_decl() #close polygon block
layer.polygons.append(tuple(poly))
processed_polys += 1
file_query.follows_block_end_decl() #close polygons block
num_of_poly_maps = file_query.get_long_value("POLYGON_MAPS")
file_query.follows_block_begin_decl() #open polygon maps block
processed_poly_maps = 0
while processed_poly_maps < num_of_poly_maps:
map_type = file_query.get_map_type(False)
map_name = file_query.get_map_name()
map_smoothing_angle = file_query.get_num_value("POLYGON_MAP_SMOOTHING_ANGLE")
polygon_count = file_query.get_long_value("POLYGONS_COUNT")
file_query.follows_block_begin_decl() #open polygon count block
poly_map = se3.PolygonMap(map_type, map_name, map_smoothing_angle)
processed_poly_idxs = 0
while processed_poly_idxs < polygon_count:
poly_map.polygons.append(file_query.get_poly_idx())
processed_poly_idxs += 1
file_query.follows_block_end_decl() #close polygon count block
processed_poly_maps += 1
layer.polygon_maps.append(poly_map)
layer.surface_maps.append(poly_map)
file_query.follows_block_end_decl() #close polygon maps block
file_query.follows_block_end_decl() #close layer block
processed_layers += 1
file_query.follows_block_end_decl() #close layers block
return mesh
def get_bl_face(se3_layer, se3_vertex_indices):
new_indices = []
num_of_texcoord_maps = len(se3_layer.texcoord_maps)
uvs_data = []
for i in range(num_of_texcoord_maps):
uvs_data.append([])
se3_texcoord_maps = se3_layer.texcoord_maps
for index in se3_vertex_indices:
se3_vertex = se3_layer.vertices[index]
new_indices.append(se3_vertex.basic_morph_pointer[1])
for uv_index, uv_pointer in enumerate(se3_vertex.uv_pointers):
elem = se3_texcoord_maps[uv_index].elements[uv_pointer[1]]
uvs_data[uv_index].append(tuple([elem[0], (-elem[1]) + 1]) )
return tuple([tuple(new_indices), uvs_data])
def get_bl_edges(se3_vertex_indices):
edges = []
num_of_indices = len(se3_vertex_indices)
last_index = num_of_indices - 1
for current_index in range(num_of_indices):
next_index = se3_vertex_indices[current_index + 1] if current_index != last_index else se3_vertex_indices[0]
edges.append((se3_vertex_indices[current_index], next_index))
return edges
def get_bl_fgons(vertices, ngon_face):
fgon_faces = []
tessed_faces = ngon_tesselate(vertices, ngon_face)
for tessed_face in tessed_faces:
fgon_face = []
for tessed_index in tessed_face:
fgon_face.append(ngon_face[tessed_index])
fgon_faces.append(tuple(fgon_face))
return fgon_faces
def edge_not_in(which_edge, edges):
for edge in edges:
edge_rev = (edge[0], edge[1])
if which_edge == edge or which_edge == edge_rev:
return False
return True
def get_bl_face_uv_data(real_face, bl_face):
num_of_uv_tex = len(real_face[1])
uvs_data = []
for i in range(num_of_uv_tex):
uvs_data.append([])
for vert_index in bl_face:
real_index = real_face[0].index(vert_index)
for idx, uv_data in enumerate(uvs_data):
try:
data = real_face[1][idx][real_index]
except:
data = (1,0)
uv_data.append(data)
return uvs_data
def read_file(operator, context):
from mathutils import (Matrix, Vector)
import math
filepath = operator.filepath
se3_mesh = get_se3_mesh_form_file(filepath)
for se3_layer in se3_mesh.layers:
fgon_edge_indices = []
vertices = se3_layer.vertex_maps[0].elements
edges = []
real_faces = []
se3_surface_map_indices = [0] * len(se3_layer.polygons)
material_indices = []
for se3_surface_map_index, se3_surface_map in enumerate(se3_layer.surface_maps):
for polygon_index in se3_surface_map.polygons:
se3_surface_map_indices[polygon_index] = se3_surface_map_index
edge_index_count = 0
for se3_polygon_index, se3_polygon in enumerate(se3_layer.polygons):
se3_num_of_vertex_indices = len(se3_polygon)
se3_is_tri_or_quad = se3_num_of_vertex_indices <= 4
se3_surface_map_index = se3_surface_map_indices[se3_polygon_index]
if se3_is_tri_or_quad:
material_indices.append(se3_surface_map_index)
face = get_bl_face(se3_layer, se3_polygon)
real_faces.append(face)
face_edges = get_bl_edges(face[0])
for face_edge in face_edges:
"""
if edge_not_in(face_edge, edges):
edges.append(face_edge)
edge_index_count += 1
"""
edges.append(face_edge)
edge_index_count += 1
else:
ngon_face = get_bl_face(se3_layer, se3_polygon)
bound_edges = get_bl_edges(ngon_face[0])
fgon_faces = get_bl_fgons(vertices, ngon_face[0])
for fgon_face in fgon_faces:
material_indices.append(se3_surface_map_index)
real_faces.append(tuple( [fgon_face, get_bl_face_uv_data(ngon_face, fgon_face)] ))
face_edges = get_bl_edges(fgon_face)
for face_edge in face_edges:
is_fgon_edge = edge_not_in(face_edge, bound_edges)
edges.append(face_edge)
if is_fgon_edge:
fgon_edge_indices.append(edge_index_count)
edge_index_count += 1
faces = [real_face[0] for real_face in real_faces]
mesh = bpy.data.meshes.new("Test mesh")
mesh.from_pydata(vertices, edges, faces)
for fgon_edge_index in fgon_edge_indices:
mesh.edges[fgon_edge_index].is_fgon = True
for uv_index, se3_texcoord_map in enumerate(se3_layer.texcoord_maps):
uv_tex = mesh.uv_layers.new(se3_texcoord_map.name)
uv_loop = mesh.uv_layers[0]
for face_index, tex_data in enumerate(uv_tex.data):
real_tex_face = real_faces[face_index][1][uv_index]
poly = mesh.polygons[face_index]
for j, k in enumerate(poly.loop_indices):
uv_loop.data[k].uv = real_tex_face[j]
tranf_mat = Matrix(((-1.0, 0.0, 0.0, 0.0),
( 0.0, 0.0, 1.0, 0.0),
( 0.0, 1.0, 0.0, 0.0),
( 0.0, 0.0, 0.0, 1.0)))
obj = bpy.data.objects.new(se3_layer.name, mesh)
context.collection.objects.link(obj)
context.view_layer.objects.active = obj
obj.select_set(True)
se3_non_basic_morph_map = se3_layer.non_basic_morph_maps
se3_vertices = se3_layer.vertices
if se3_non_basic_morph_map:
obj.shape_key_add("position")
shape_keys = []
for se3_other_mmap in se3_non_basic_morph_map:
shape_keys.append(obj.shape_key_add(se3_other_mmap.name))
for se3_vertex in se3_vertices:
other_morph_pnts = se3_vertex.non_basic_morph_pointers
if other_morph_pnts:
for idx, other_mp in enumerate(other_morph_pnts):
type_idx = se3_layer.vertex_maps[other_mp[0]].type_index
se3_disp = se3_layer.vertex_maps[other_mp[0]].elements[other_mp[1]]
se3_vert = se3_vertex.basic_morph_pointer[1]
vert_data = se3_layer.vertex_maps[se3_vertex.basic_morph_pointer[0]].elements[se3_vertex.basic_morph_pointer[1]]
shape_keys[type_idx - 1].data[se3_vert].co = Vector(vert_data) + Vector(se3_disp)
se3_weight_maps = se3_layer.weight_maps
if se3_weight_maps:
vertex_groups = []
for se3_weight_map in se3_weight_maps:
vertex_groups.append(obj.vertex_groups.new(se3_weight_map.name))
for se3_vertex in se3_vertices:
se3_weight_pointers = se3_vertex.weight_pointers
if se3_weight_pointers:
for se3_weight_pointer in se3_weight_pointers:
vertex_index = se3_vertex.basic_morph_pointer[1]
se3_vertex_map_index = se3_weight_pointer[0]
se3_vertex_weight = se3_layer.vertex_maps[se3_vertex_map_index].elements[se3_weight_pointer[1]]
vertex_group_index = se3_layer.vertex_maps[se3_vertex_map_index].type_index
vertex_groups[vertex_group_index].add([vertex_index], se3_vertex_weight, 'REPLACE')
if se3_layer.surface_maps:
materials = []
for se3_surface_map in se3_layer.surface_maps:
material = bpy.data.materials.new(se3_surface_map.name)
materials.append(material)
bpy.ops.object.material_slot_add()
obj.material_slots[-1].material = material
for face in mesh.polygons:
face.material_index = material_indices[face.index]
obj.matrix_world = tranf_mat
bpy.ops.object.transform_apply(rotation=True)
context.view_layer.update()
return {'FINISHED'} | 2.328125 | 2 |
src/plugins/basic/__init__.py | inuEbisu/inuBot | 0 | 12792212 | <filename>src/plugins/basic/__init__.py
from nonebot import on_command
from nonebot.rule import to_me
from nonebot.typing import T_State
from nonebot.adapters import Bot, Event
from . import req, lang
matcher_plugins = on_command('plugins', aliases={'help', '插件', '帮助'}, priority=1)
@matcher_plugins.handle()
async def main_plugins(bot: Bot, event: Event, state: T_State):
msg = req.handle_plugins()
await matcher_plugins.finish(msg)
matcher_basic = on_command('basic', priority=4)
@matcher_basic.handle()
async def main_basic(bot: Bot, event: Event, state: T_State):
await matcher_basic.finish(lang.help_guide) | 1.992188 | 2 |
routes.py | Yusef28/flattery | 0 | 12792213 | <filename>routes.py<gh_stars>0
#!interpreter [optional-arg]
# -*- coding: utf-8 -*-
#
"""
routes.py: All Routes
"""
#Built-in/Generic
import datetime
#Libs
from flask import Flask, g, redirect, render_template, request, url_for, session, flash
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import (
Table, Column, Integer, String, MetaData, ForeignKey, Boolean
)
from sqlalchemy import or_
#Modules
from flask_app import db, app
from models import User, Msg
import msg_routes, user_routes
from functools import wraps
#for some reason I need to also import all from each of these.
#especially list_routes for find all from list
from msg_routes import *
from user_routes import *
#needs to be above all functions that use it???
def admin_login_required(f):
@wraps(f)
def decorated_function():
if g.user is None or "user_id" not in session:
return redirect(url_for("dashboard_forbidden"))
elif user_read(session['user_id']).type != "admin":
return redirect(url_for("dashboard_forbidden"))
return f()
return decorated_function
@app.route("/get_msgs_and_current_msg")
def get_msgs_and_current_msg():
user = user_read(session['user_id'])
msgs = Msg.query.all()
#current_msg = find_current_msg(msgs)
return msgs, None#, current_msg
@app.route("/")
def index():
num_users = db.session.query(User).count()
num_msgs = db.session.query(Msg).count()
if 'user_id' not in session or not user_read(session['user_id']):
return render_template('auth/index.html', num_users=num_users, num_msgs=num_msgs)
else:
msgs, current_msg = get_msgs_and_current_msg()
return render_template('auth/index.html', msgs=msgs, current_msg=current_msg,
num_users=num_users, num_msgs=num_msgs)
#Messages @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@app.route("/dashboard_show_msgs")
@admin_login_required
def dashboard_show_msgs():
#get all msgs for the user
msgs, current_msg = get_msgs_and_current_msg()
return render_template('list/dashboard_msgs.html', msgs=msgs)
@app.route("/dashboard_show_live_msgs")
@admin_login_required
def dashboard_show_live_msgs():
#get all msgs for the user
msgs = db.session.query(Msg).filter(Msg.type=="live")
return render_template('list/dashboard_msgs.html', msgs=msgs)
@app.route("/dashboard_show_waiting_msgs")
@admin_login_required
def dashboard_show_waiting_msgs():
#get all msgs for the user
msgs = db.session.query(Msg).filter(Msg.type=="waiting")
return render_template('list/dashboard_msgs.html', msgs=msgs)
@app.route("/dashboard_show_reported_msgs")
@admin_login_required
def dashboard_show_reported_msgs():
#get all msgs for the user
msgs = db.session.query(Msg).filter(Msg.type=="reported")
return render_template('list/dashboard_msgs.html', msgs=msgs)
#Users $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
@app.route("/dashboard_show_all_users")
@admin_login_required
def dashboard_show_all_users():
users = User.query.all()
title = "All Users"
return render_template('list/dashboard_users.html', users=users, title=title)
@app.route("/dashboard_show_online_users")
@admin_login_required
def dashboard_show_online_users():
users = db.session.query(User).filter(User.online==True)
title = "Online Users"
return render_template('list/dashboard_users.html', users=users, title=title)
@app.route("/dashboard_show_admin_users")
@admin_login_required
def dashboard_show_admin_users():
users = db.session.query(User).filter(User.type=="admin")
title = "Admin Users"
return render_template('list/dashboard_users.html', users=users, title=title)
@app.route("/dashboard_show_premium_users")
@admin_login_required
def dashboard_show_premium_users():
users = db.session.query(User).filter(User.type=="premium")
title = "Premium Users"
return render_template('list/dashboard_users.html', users=users, title=title)
@app.route("/dashboard_show_reported_users")
@admin_login_required
def dashboard_show_reported_users():
users = db.session.query(User).filter(User.type=="reported")
title = "Reported Users"
return render_template('list/dashboard_users.html', users=users, title=title)
@app.route("/dashboard_show_banned_users")
@admin_login_required
def dashboard_show_banned_users():
users = db.session.query(User).filter(User.type=="banned")
title = "Banned Users"
return render_template('list/dashboard_users.html', users=users, title=title)
# Main ########################################################################################
@app.route("/dashboard_forbidden")
def dashboard_forbidden():
return render_template('list/forbidden.html')
@app.route("/dashboard_error_404")
def dashboard_error_404():
return render_template('list/error_404.html')
@app.route("/dashboard")
@admin_login_required
def dashboard():
#get all msgs for the user
msgs, current_msg = get_msgs_and_current_msg()
#if not current_msg:
#-->
return render_template('list/dashboard_msgs.html', msgs=msgs)
#current, completed, deleted = [], [], []
#tasks = Task.query.filter_by(parent_list=current_msg.id)
#tasks = sorted(list(tasks), key=lambda x:(
#-x.important,
#x.state=="deleted",
#x.state=="current",
#x.state=="completed",
#x.id))
#for task in tasks:
# if task.state == "current":
# current.append(task)
# elif task.state == "completed":
# completed.append(task)
# else:
# deleted.append(task)
#current = sorted(current, key=lambda x:(-x.important, x.sort_value))
#return render_template('list/dashboard_filter_all.html',
#msgs=msgs,
#current=current,
#filter="All")
#find the list with current=True
#@app.route("/find_current_msg")
#def find_current_msg(msgs):#
# for msg in msgs:
# if msg.current == True:
# return msg
# return None | 2.328125 | 2 |
program/myparser.py | peter2141/IBT | 0 | 12792214 | import ply.lex as lex
import ply.yacc as yacc
import global_var
# definicie tokenov
tokens = (
'STRING', 'NUMBER', 'FIELD', 'GRE', 'LOE', 'EQ', 'NEQ',
)
# literaly
literals = ['+', '-', '*', '/', '>', '<']
# popis tokenov
t_FIELD = r'[a-zA-Z0-9_\.][a-zA-Z0-9_\.]*'
t_STRING = r'\".*\"'
t_GRE = '>='
t_LOE = '<='
t_EQ = '=='
t_NEQ = '!='
t_ignore = " \t"
def t_NUMBER(t):
r'\d+'
t.value = int(t.value)
return t
# chybny token
def t_error(t):
print("Nepodporovany znak '%s'" % t.value[0])
global_var.parseerror = True
t.lexer.skip(1)
# gramatika
def p_inp(p):
'''inp : exp '>' exp
| exp '<' exp
| exp GRE exp
| exp LOE exp
| exp NEQ exp
| exp EQ exp'''
def p_exp(p):
'''exp : STRING
| x exp2'''
def p_exp2(p):
'''exp2 : '+' x exp2
| '-' x exp2
| '*' x exp2
| '/' x exp2
| '''
def p_x1(p):
'''x : NUMBER'''
def p_x2(p):
'''x : FIELD'''
global_var.fields.append(p[1]) # pridanie fieldu do listu fieldov
# ak chyba v gramatike vypise sa chyba, nastavi sa globalna premenna na urcenie chyby
def p_error(p):
if p:
print("Syntakticka chyba pri '%s'" % p.value)
global_var.parseerror = True
else:
print("Syntakticka chyba pri EOF")
global_var.parseerror = True
# funkcia na zostavenie lexemu a parseru
# vstupom je vyraz, pri lexikalnej alebo syntaktickej chybe vrati False, inak True
def myparse(expression):
lexer = lex.lex()
parser = yacc.yacc(debug=False) # vypnutie debugovacieho suboru
# kontrola lexemov
lexer.input(expression)
while True:
tok = lexer.token()
if global_var.parseerror: # ak chyba
global_var.parseerror = False # reset flagu
return False
if not tok:
break
# syntakticka kontrola
parser.parse(expression)
if global_var.parseerror: # ak chyba
global_var.parseerror = False # reset flagu
return False
return True
| 2.65625 | 3 |
cloudcix_rest/migrations/0001_initial.py | CloudCIX/framework | 0 | 12792215 | <filename>cloudcix_rest/migrations/0001_initial.py
# Generated by Django 2.0.7 on 2018-07-09 13:50
from typing import List
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies: List[str] = []
operations = [
migrations.CreateModel(
name='APILog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime', models.DateTimeField(auto_now_add=True)),
('user_id', models.CharField(max_length=64)),
('api_key', models.CharField(max_length=64)),
('url', models.URLField()),
('method', models.CharField(max_length=10)),
],
),
]
| 1.992188 | 2 |
github/models.py | billryan/github-rss | 0 | 12792216 | <reponame>billryan/github-rss
from django.db import models
class Repo(models.Model):
repo_url = models.URLField(max_length=200)
owner = models.CharField(max_length=200)
repo = models.CharField(max_length=200)
def __unicode__(self):
return self.owner + '/' + self.repo
| 2.015625 | 2 |
example3.py | djinn/python-duckduckgo | 2 | 12792217 | from duckduckgo import query
def wikipedia_presence(text):
"""Find if a query has wikipedia article"""
return query(text).abstract.url if query(text).abstract != None and query(text).abstract.source == 'Wikipedia' else None
if __name__ == '__main__':
import sys
print wikipedia_presence(' '.join(sys.argv[1:]))
| 3.09375 | 3 |
Training_LSTM.py | monikaheinzl/basecalling-Hopfield | 0 | 12792218 | <gh_stars>0
#!/usr/bin/env python
"""
BSD 2-Clause License
Copyright (c) 2021 (<EMAIL>)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import h5py
import numpy as np
import os
import sys
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision
import pickle
import random
import time
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.backends.backend_pdf import PdfPages
import math
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from torch.utils.tensorboard import SummaryWriter
import itertools
import seaborn as sns
import pandas as pd
import argparse
from distutils.util import strtobool
from polyleven import levenshtein
sys.path.insert(0, '/basecaller-modules')
from early_stopping import EarlyStopping
from cnn import SimpleCNN, BasicBlock, SimpleCNN_res
from cnn import outputLen_Conv, outputLen_AvgPool, outputLen_MaxPool
plt.switch_backend('agg')
class Range(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __eq__(self, other):
return self.start <= other <= self.end
def make_argparser():
parser = argparse.ArgumentParser(description='Nanopore Basecaller')
parser.add_argument('-i', '--input', required = True,
help="File path to the pickle input file.")
parser.add_argument('-o', '--output', required = True,
help="Output folder name")
parser.add_argument('-g', '--gpu_port', type=int, default=1,
help="Port on GPU mode")
parser.add_argument('-s', '--set_seed', type=int, default=1234,
help="Set seed")
parser.add_argument('-b', '--batch_size', type=int, default=256,
help="Batch size")
parser.add_argument('-e', '--epochs', type=int, default=500,
help="Number of epochs")
parser.add_argument('-v', '--make_validation', type=int, default=1000,
help="Make every n updates evaluation on the validation set")
# CNN arguments
parser.add_argument("--input_bias_cnn", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True)
parser.add_argument('-c', '--channel_number', type=int, default=256,
help="Number of output channels in Encoder-CNN")
parser.add_argument('-l', '--cnn_layers', type=int, default=1,
help="Number of layers in Encoder-CNN")
parser.add_argument('--pooling_type', default="None",
help="Pooling type in Encoder-CNN")
parser.add_argument('--strides', nargs='+', type=int, default=[1, 1, 1],
help="Strides in Encoder-CNN")
parser.add_argument('--kernel', nargs='+', type=int, default=[11, 11, 11],
help="Kernel sizes in Encoder-CNN")
parser.add_argument("--dropout_cnn", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument("--dropout_input", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument('--drop_prob', type=float, default=Range(0.0, 1.0),
help="Dropout probability Encoder-CNN")
parser.add_argument("--batch_norm", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
# LSTM arguments
parser.add_argument("--attention", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True)
parser.add_argument('--dropout', type=float, default=Range(0.0, 1.0),
help="Dropout probability Encoder-LSTM")
parser.add_argument('-u', '--hidden_units', type=int, default=256,
help="Number of hidden units in Encoder-Decoder-LSTM")
parser.add_argument('--lstm_layers', type=int, default=1,
help="Number of layers in Encoder-LSTM")
parser.add_argument('--forget_bias_encoder', type=str, default="0",
help="Set forget gate bias in Encoder-LSTM")
parser.add_argument('--forget_bias_decoder', type=str, default="0",
help="Set forget gate bias in Decoder-LSTM")
parser.add_argument("--bi_lstm", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
# teacher forcing
parser.add_argument("--reduced_tf", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument('--tf_ratio', type=float, default=Range(0.0, 1.0),
help="Teacher forcing ratio. Default=1, TF on")
parser.add_argument('--weight_decay', type=float, default=0,
help="Weight decay")
parser.add_argument('--learning_rate', type=float, default=0.001,
help="Weight decay")
parser.add_argument("--reduce_lr", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument('--gradient_clip', default="None",
help="Gradient clipping")
# early stopping
parser.add_argument("--early_stop", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument('--patience', type=int, default=25,
help="Patience in early stopping")
parser.add_argument("--call", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument("--editD", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
return parser
# Network
# -----------
# * CNN-Encoder
# * LSTM-Encoder
# * LSTM-Decoder
# The Encoder
# -----------
class LSTMCellEncoder(nn.Module):
def __init__(self, input_size, hidden_size, bias=True):
super(LSTMCellEncoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.i2h = nn.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = nn.Linear(hidden_size, 4 * hidden_size, bias=bias)
# self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
w.data.uniform_(-std, std)
def forward(self, x, hidden):
h, c = hidden
h = h.view(h.size(0), -1)
c = c.view(c.size(0), -1)
x = x.view(x.size(0), -1)
# Linear mappings
gates = self.i2h(x) + self.h2h(h)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = (forgetgate * c) + (ingate * cellgate)
hy = outgate * F.tanh(cy)
return hy, (hy, cy), (torch.mean(ingate).cpu(),torch.mean(forgetgate).cpu(),
torch.mean(cellgate).cpu(), torch.mean(outgate).cpu())
class LSTMencoder(nn.Module):
#Our batch shape for input x is [batch, seq_len, input_dim]
def __init__(self, input_dim, hidden_dim, batch_size, output_dim=5,
num_layers=2, own_cell_encoder = False, bidirectional=False, port=1, dropout=0):
super(LSTMencoder, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.batch_size = batch_size
self.num_layers = num_layers
self.output_dim = output_dim
self.own_cell_encoder = own_cell_encoder
self.bidirectional = bidirectional
self.port = port
self.dropout=dropout
# Define the LSTM layer
self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=self.bidirectional, dropout=self.dropout)
# self.lstm_p = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True)
def init_hidden(self):
# This is what we'll initialise our hidden state as
return (torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port),
torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).requires_grad_().cuda(self.port))
def forward(self, x, x_lengths):
#hidden = self.init_hidden()
# Forward pass through LSTM layer
# shape of lstm_in: [batch, seq_len, input_dim]
# shape of lstm_out: [batch, seq_len, output_dim]
# shape of self.hidden: (a, b), where a and b both have shape (num_layers, batch_size, hidden_dim).
# Sort instances by sequence length in descending order
#print("in length", x_lengths)
sorted_len, sorted_idx = x_lengths.sort(0, descending=True)
index_sorted_idx = sorted_idx.view(-1, 1, 1).expand_as(x)
sorted_inputs = x.gather(0, index_sorted_idx.long()) # [batch, seq_len, input_dim]
# pack_padded_sequence so that padded items in the sequence won't be shown to the LSTM
packed_seq = nn.utils.rnn.pack_padded_sequence(sorted_inputs, sorted_len.cpu().numpy(), batch_first=True)
# Define the LSTM layer
lstm_out, hidden = self.lstm(packed_seq) # [seq_len, batch, input_dim]
# undo the packing operation
unpacked, unpacked_len = nn.utils.rnn.pad_packed_sequence(lstm_out, batch_first=True)
# targets oder batch sortieren
# unsort the output
_, original_idx = sorted_idx.sort(0, descending=False)
# unsort hiddens
original_idx = original_idx.cpu()
unsorted_idx_hiddens = original_idx.view(1, -1, 1).expand_as(hidden[0])
unsorted_hiddens1 = hidden[0][:, original_idx, :]#.gather(0, unsorted_idx_hiddens.long())
unsorted_hiddens2 = hidden[1][:, original_idx, :]#.gather(0, unsorted_idx_hiddens.long())
hidden_original = (unsorted_hiddens1, unsorted_hiddens2)#.cpu()
unpacked_original = unpacked[original_idx, :, :]
return hidden_original, unpacked_original
# The Decoder
# -----------
class LSTMCellDecoder(nn.Module):
def __init__(self, input_size, hidden_size, bias=True, port=1):
super(LSTMCellDecoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.i2h = nn.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = nn.Linear(hidden_size, 4 * hidden_size, bias=bias)
self.port = port
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
w.data.uniform_(-std, std)
def forward(self, x, hidden):
h, c = hidden
h = h.view(x.size(0), -1)
c = c.view(x.size(0), -1)
x = x.view(x.size(0), -1)
# Linear mappings
gates = self.i2h(x) + self.h2h(h)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = (forgetgate * c) + (ingate * cellgate)
hy = outgate * F.tanh(cy)
#print("hy", hy.size())
hy = torch.unsqueeze(hy, 0).cuda(self.port) # (batch, 1, feature)
cy = torch.unsqueeze(cy, 0).cuda(self.port) # (batch, 1, feature)
return hy, (hy, cy), (torch.mean(ingate).cpu(), torch.mean(forgetgate).cpu(),
torch.mean(cellgate).cpu(), torch.mean(outgate).cpu())
class LSTMdecoder(nn.Module):
def __init__(self, input_dim, hidden_dim, batch_size, output_dim=5,
num_layers=2, own_cell_decoder = False, bidirectional = False, port=1, attention=True, dropout=0):
super(LSTMdecoder, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.batch_size = batch_size
self.num_layers = num_layers
self.output_dim = output_dim
self.own_cell_decoder = own_cell_decoder
self.bidirectional = bidirectional
self.port=port
self.attention = attention
self.dropout = dropout
# Define the LSTM layer
if self.bidirectional:
if self.attention:
self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim*2, self.hidden_dim*2, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout)
else:
self.lstm_unroll = nn.LSTM(self.input_dim, self.hidden_dim*2, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout)
else:
if self.attention:
self.lstm_unroll = nn.LSTM(self.input_dim+self.hidden_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout)
else:
self.lstm_unroll = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=False, dropout=self.dropout)
# Define the output layer
if self.bidirectional:
if self.attention:
# attention
self.attn = nn.Linear(self.hidden_dim * 4, 1)
#self.attn = nn.Linear(self.hidden_dim * 4, self.hidden_dim * 2)
self.linear = nn.Linear(self.hidden_dim*2, self.output_dim)
else:
if self.attention:
# attention
self.attn = nn.Linear(self.hidden_dim * 2, 1)
#self.attn = nn.Linear(self.hidden_dim * 2, self.hidden_dim)
self.linear = nn.Linear(self.hidden_dim, self.output_dim)
def forward(self, encoder_outputs, true_x_len, max_label_len, y, teacher_forcing_ratio, hidden,
true_y_len, y_class, random_value, mode_type, beam_width=1):
# Forward pass through LSTM layer
# shape of lstm_in: [batch, input_dim]
# shape of lstm_out: [seq_len, batch, output_dim]
# shape of self.hidden: (a, b), where a and b both have shape (num_layers, batch_size, hidden_dim).
if self.bidirectional:
hidden1 = hidden[0]
hidden2 = hidden[1]
h1 = torch.cat([hidden1[0:hidden1.size(0):2], hidden1[1:hidden1.size(0):2]], 2)
h2 = torch.cat([hidden2[0:hidden2.size(0):2], hidden2[1:hidden2.size(0):2]], 2)
hidden = (h1, h2)
batch_size = true_y_len.size(0)
max_for = range(max_label_len)
outputs = torch.zeros(max_label_len, batch_size, self.output_dim).cuda(self.port) # [max. target length, batch size, hidden dim]
max_in_batch = int(max(true_y_len.cpu()))
start_seq = True
input_decoder = torch.zeros(batch_size, self.input_dim).cuda(self.port) # SOS token = 1 [batch size, SOS token]
for i in max_for:
# Stop looping if we got to the last element in the batch
if i == max_in_batch:
break
# when seq length (i) >= true seq length
if i >= true_y_len[-1].item() and len(true_y_len) > 1:
not_padded_batches = i < true_y_len # get indices of not padded sequences
true_y_len = true_y_len[not_padded_batches] # remove smallest element = last one
#print(true_y_len, true_y_len.size())
input_decoder = input_decoder[not_padded_batches, :] # get only batches that are NOT padded
h = hidden[0][:, not_padded_batches, :]
c = hidden[1][:, not_padded_batches, :]
hidden = (h, c)
label = y[not_padded_batches, :, i] # batch-i, features, seq len
y = y[not_padded_batches, :, :] # batch-i, features, seq len
encoder_outputs = encoder_outputs[not_padded_batches, :, :]
else:
label = y[:, :, i] # batch, features, seq len
if self.attention:
# ATTENTION MECHANISM
attn_weights = F.softmax(
self.attn(torch.cat((encoder_outputs[:, i, :], hidden[0][0]), dim=1)), dim=1) # encoder batch first: b,len,hidden, hidden: 2,b,hidden
attn_applied = torch.bmm(attn_weights.type(torch.FloatTensor).unsqueeze(1).cuda(self.port),
encoder_outputs[:, i, :].type(torch.FloatTensor).unsqueeze(1).cuda(self.port))
input_decoder = torch.cat((input_decoder, attn_applied.squeeze(1)), dim=1) # input_decoder: b,hidden; attn_applied: b, 1, hidden
input = torch.unsqueeze(input_decoder.type(torch.FloatTensor), 1).cuda(self.port) # (batch, seq, feature),
lstm_in_unroll, hidden = self.lstm_unroll(input, hidden) # [batch size, seq, hidden dim]
lstm_in_unroll = self.linear(lstm_in_unroll.view(-1, lstm_in_unroll.size(2))) # (batch_size*seq, out_dim)
input_decoder = F.log_softmax(lstm_in_unroll, dim=1) # (batch_size*seq, out_dim) --> (batch_size, out_dim)
#decide if we are going to use teacher forcing or not
teacher_force = random.random() < teacher_forcing_ratio
# put on position: seq, 0:true_batch_size, features
# rest: seq, true_batch_size:max_seq_len, features filled with 0
outputs[i, 0:input_decoder.size(0), :] = input_decoder # [max. target length, batch size, output dim]
top1 = input_decoder.argmax(1) # get argmax of prediction
#if teacher forcing, use actual next token as next input
#if not, use predicted token
onehot = torch.zeros(len(top1), self.output_dim).cuda(self.port)
onehot[:, top1] = 1 # one hot encode input
input_decoder = label if teacher_force else onehot # [batch size, out dim]
return outputs, hidden
# Sequence to sequence model
# -----------
class Seq2Seq(nn.Module):
def __init__(self, cnn_encoder, lstm_encoder, lstm_decoder):
super().__init__()
self.cnn_encoder = cnn_encoder
self.lstm_encoder = lstm_encoder
self.lstm_decoder = lstm_decoder
assert lstm_encoder.hidden_dim == lstm_decoder.hidden_dim, "Hidden dimensions of encoder and decoder must be equal!"
assert lstm_encoder.num_layers == lstm_decoder.num_layers, "Encoder and decoder must have equal number of layers!"
assert lstm_encoder.batch_size == lstm_decoder.batch_size, "Encoder and decoder must have equal batch size!"
def forward(self, inputs, seq_len, teacher_forcing_ratio, labels=None, lab_len=None,
labels10=None, max_label_len=None, mode_type="train", beam_width=1):
###########################################
##### Encoder #############################
###########################################
#### CNN
#Forward pass, backward pass, optimize
in_lstm, seq_len_cnn = self.cnn_encoder(inputs, seq_len)
in_lstm = torch.transpose(in_lstm, 1, 2) # input for LSTM batch_size x seq_length x input_size
#### LSTM
if (seq_len_cnn <= 0.).sum().item() > 0: # remove neagtive samples
negative_idx = seq_len_cnn > 0
seq_len_cnn = seq_len_cnn[negative_idx]
in_lstm = in_lstm[negative_idx, : , :] # [batch, seq_len, input_dim]
lab_len = lab_len[negative_idx]
labels = labels[negative_idx, :]
labels10 = labels10[negative_idx, :, :]
encoder_hidden, ecoder_output = self.lstm_encoder(in_lstm, seq_len_cnn)
###########################################
##### Sorting #############################
###########################################
#### sort by decreasing target length
sorted_len_target, sorted_idx_target = lab_len.sort(0, descending=True)
sorted_hiddens1 = encoder_hidden[0][:, sorted_idx_target, :]
sorted_hiddens2 = encoder_hidden[1][:, sorted_idx_target, :]
sorted_hiddens = (sorted_hiddens1, sorted_hiddens2)
sorted_encoder_output = ecoder_output[sorted_idx_target, :, :]
# sort labels so that they match with order in batch
labels_sorted = labels[sorted_idx_target, :]#.gather(0, sorted_idx_lab.long())
labels10_sorted = labels10[sorted_idx_target, :, :] # batch, out_size, seq_len
###########################################
##### Decoder #############################
###########################################
#### LSTM
random_value = random.random()
out_decoder, decoder_hidden = self.lstm_decoder(
sorted_encoder_output, seq_len_cnn, max_label_len, labels10_sorted,
teacher_forcing_ratio, sorted_hiddens,
sorted_len_target, labels_sorted, random_value, mode_type, beam_width) # seq_length x batch_size x out_size
return out_decoder, labels_sorted, sorted_len_target
# In[ ]:
def get_train_loader_trainVal(tensor_x, sig_len, tensor_y, label_len, label10, batch_size, shuffle=True):
my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len, label10) # create your datset
train_loader = torch.utils.data.DataLoader(my_dataset, batch_size=batch_size,
num_workers=0, pin_memory=False, shuffle=shuffle) # create your dataloader
return(train_loader)
def get_train_loader(tensor_x, sig_len, tensor_y, label_len, label10, read_idx, batch_size, shuffle=False):
my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len, label10, read_idx) # create your datset
train_loader = torch.utils.data.DataLoader(my_dataset, batch_size=batch_size,
num_workers=0, pin_memory=False, shuffle=shuffle) # create your dataloader
return(train_loader)
def convert_to_string(pred, target, target_lengths):
import editdistance
vocab = {0: "A", 1: "C", 2: "G", 3: "T", 4: "<EOS>", 5: "<PAD>"}
editd = 0
num_chars = 0
for idx, length in enumerate(target_lengths):
length = int(length.item())
seq = pred[idx]
seq_target = target[idx]
encoded_pred = []
for p in seq:
if p == 4:
break
encoded_pred.append(vocab[int(p.item())])
encoded_pred = ''.join(encoded_pred)
encoded_target = ''.join([vocab[int(x.item())] for x in seq_target[0:length]])
result = editdistance.eval(encoded_pred, encoded_target)
editd += result
num_chars += len(encoded_target)
return editd, num_chars
def trainNet(model, train_ds, optimizer, criterion, clipping_value=None, val_ds=None,
test_ds=None, batch_size=256, n_epochs=500, teacher_forcing_ratio=0.5, reduced_TF=True,
make_validation=1000, mode="train", shuffle=True, patience = 25,
file_name="model", earlyStopping=False, writer="", editD=True, reduce_lr=False):
#Print all of the hyperparameters of the training iteration:
print("===== HYPERPARAMETERS =====")
print("batch_size=", batch_size)
print("epochs=", n_epochs)
print("gradient clipping=", clipping_value)
print("teacher forcing ratio=", teacher_forcing_ratio)
print("shuffle=", shuffle)
if val_ds is not None:
input_x_val = val_ds[0]
input_y_val = val_ds[1]
input_y10_val = val_ds[2]
signal_len_val = val_ds[3]
label_len_val = val_ds[4]
read_val = val_ds[5]
input_x = train_ds[0]
input_y = train_ds[1]
input_y10 = train_ds[2]
signal_len = train_ds[3]
label_len = train_ds[4]
read_train = train_ds[5]
#Get training data
train_loader = get_train_loader_trainVal(input_x, signal_len,
input_y, label_len,
input_y10, batch_size=batch_size, shuffle=True)
if val_ds != None:
val_loader = get_train_loader_trainVal(input_x_val, signal_len_val,
input_y_val, label_len_val,
input_y10_val, batch_size=batch_size, shuffle=True)
if earlyStopping:
# initialize the early_stopping object
early_stopping = EarlyStopping(patience=patience, verbose=True, delta=0.01, name=file_name, relative=True, decrease_lr_scheduler=reduce_lr)
dict_activations_in = {}
dict_activations_forget = {}
dict_activations_cell = {}
dict_activations_out = {}
dict_activations_in_decoder = {}
dict_activations_forget_decoder = {}
dict_activations_cell_decoder = {}
dict_activations_out_decoder = {}
dict_training_loss = {}
dict_validation_loss = {}
dict_training_acc = {}
dict_validation_acc = {}
dict_training_editd = {}
dict_validation_editd = {}
dict_training_loss2 = {}
dict_validation_loss2 = {}
dict_training_acc2 = {}
dict_validation_acc2 = {}
dict_training_editd2 = {}
dict_validation_editd2 = {}
dict_weights = {}
dict_gradients = {}
running_loss_train = 0.0
running_loss_val = 0.0
running_acc_train = 0.0
running_acc_val = 0.0
running_editd_train = 0.0
running_editd_val = 0.0
updates = 0
heatmap_g = None
heatmap_w = None
heatmap_g_b = None
heatmap_w_b = None
counter_updates_teacherForcing = 0
old_ed = 0
if reduce_lr:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')
#Loop for n_epochs
for epoch in range(n_epochs):
if earlyStopping and early_stopping.early_stop: # break epoch loop
print("Early stopping")
break
model.train()
epoch_loss = 0
epoch_acc = 0
epoch_loss_val = 0
epoch_acc_val = 0
epoch_editd_val = 0
epoch_editd = 0
print("=" * 30)
print("epoch {}/{}".format(epoch+1, n_epochs))
print("=" * 30)
total_train_loss = 0
loss_iteration = []
acc_iteration = []
editd_iteration = []
for iteration, data in enumerate(train_loader):
model.train()
#Set the parameter gradients to zero
optimizer.zero_grad()
batch_x = data[0]
batch_y = data[1]
seq_len = data[2]
lab_len = data[3]
batch_y10 = data[4]
#Wrap them in a Variable object
inputs, labels, labels10 = Variable(batch_x, requires_grad=False), Variable(batch_y, requires_grad=False), Variable(batch_y10, requires_grad=False) # batch_size x out_size x seq_length
output, sorted_labels, sorted_labels_len = model(inputs, seq_len, teacher_forcing_ratio, labels,
lab_len, labels10, labels.size(1), mode)
output = torch.transpose(output, 0, 1).contiguous() # input for LSTM seq_length x out_size
# Calculate cross entropy loss
# output = (seq*batch, out dim), target = (seq*batch)
# Target nicht one-hot encoden
reshaped_output = output.view(-1, output.size(2))
reshaped_sorted_labels = sorted_labels.view(-1)
notpadded_index = reshaped_sorted_labels != 5 # indices of not padded elements
loss = criterion(reshaped_output, reshaped_sorted_labels.long())
loss_iteration.append(loss.item())
epoch_loss += loss.item()
running_loss_train += loss.item()
acc = (reshaped_output[notpadded_index, :].argmax(1) ==
reshaped_sorted_labels[notpadded_index]
).sum().item() / reshaped_sorted_labels[notpadded_index].size(0)
epoch_acc += acc
running_acc_train += acc
acc_iteration.append(acc)
#if editD:
# if updates % make_validation == 0:
# ed = np.mean(np.array(convert_to_string(output.argmax(2), sorted_labels, sorted_labels_len)))
# ed2 = ed
# else:
# ed = 0
# ed2 = old_ed
#
# old_ed = ed2
# epoch_editd += ed
# running_editd_train += ed
# editd_iteration.append(ed2)
# print("edit distance= {0:.4f}".format((epoch_editd / float(iteration + 1))))
if updates % make_validation == 0:
print("=" * 30)
print("batch {} in epoch {}/{}".format(iteration+1, epoch+1, n_epochs))
print("=" * 30)
print("loss= {0:.4f}".format(epoch_loss / float(iteration + 1)))
print("acc= {0:.4f} %".format((epoch_acc / float(iteration + 1)) * 100))
if reduce_lr:
print("lr= " + str(optimizer.param_groups[0]['lr']))
print("teacher forcing ratio= {}".format(teacher_forcing_ratio), ", update= ", updates, ", half of updates= ", int((len(train_loader) * n_epochs)*0.5))
# Backward pass
loss.backward()
#clipping_value = 1 #arbitrary number of your choosing
if clipping_value != "None":
nn.utils.clip_grad_norm_(model.parameters(), int(clipping_value))
# Update encoder and decoder
optimizer.step()
if (val_ds != None) and (updates % make_validation == 0): # or (updates == n_epochs-1)):
if reduced_TF:
#if updates > int((len(train_loader) * n_epochs)*0.5) and teacher_forcing_ratio >= 0.5:
if (running_acc_train / float(make_validation)) > 0.35 and teacher_forcing_ratio >= 0.25:
# if we have reached half of the updates
teacher_forcing_ratio = teacher_forcing_ratio * 0.95
else:
teacher_forcing_ratio
# Evaluation on the validation set
val_losses = []
val_acc = []
val_editd = []
model.eval()
total_ed = 0
total_num_chars = 0
with torch.no_grad():
for iteration_val, data_val in enumerate(val_loader):
batch_x_val = data_val[0]
batch_y_val = data_val[1]
seq_len_val = data_val[2]
lab_len_val = data_val[3]
batch_y10_val = data_val[4]
inputs_val, labels_val, labels10_val = Variable(batch_x_val, requires_grad=False), Variable(batch_y_val, requires_grad=False), Variable(batch_y10_val, requires_grad=False)
# batch_size x out_size x seq_length
output_val, sorted_labels_val, sorted_labels_len_val = model(inputs_val, seq_len_val,
0, labels_val, lab_len_val,
labels10_val, labels_val.size(1), mode)
output_val = torch.transpose(output_val, 0, 1).contiguous() # input for LSTM seq_length x out_size
# Calculate cross entropy loss
# output = (seq*batch, out dim), target = (seq*batch)
# Target nicht one-hot encoden
reshaped_output_val = output_val.view(-1, output_val.size(2))
reshaped_sorted_labels_val = sorted_labels_val.view(-1)
notpadded_index_val = reshaped_sorted_labels_val != 5 # indices of not padded elements
loss_val = criterion(reshaped_output_val, reshaped_sorted_labels_val.long())
val_losses.append(loss_val.item())
epoch_loss_val += loss_val.item()
running_loss_val += loss_val.item()
acc_val = (reshaped_output_val[notpadded_index_val, :].argmax(1) ==
reshaped_sorted_labels_val[notpadded_index_val]
).sum().item() / reshaped_sorted_labels_val[notpadded_index_val].size(0)
epoch_acc_val += acc_val
running_acc_val += acc_val
val_acc.append(acc_val)
if editD:
ed_val, num_char_ref = convert_to_string(output_val.argmax(2), sorted_labels_val, sorted_labels_len_val)
epoch_editd_val += ed_val
running_editd_val += ed_val
val_editd.append(ed_val)
total_ed += ed_val
total_num_chars += num_char_ref
if editD:
cer = float(total_ed) / total_num_chars
if updates == 0:
writer.add_scalar('Loss/train', np.mean(loss_iteration), updates)
writer.add_scalar('Loss/validation', np.mean(val_losses), updates)
writer.add_scalar('Accuracy/train', np.mean(acc_iteration), updates)
writer.add_scalar('Accuracy/validation', np.mean(val_acc), updates)
if editD:
#writer.add_scalar('Edit Distance/train', running_editd_train, updates)
writer.add_scalar('Edit Distance/validation', cer, updates)
#dict_training_editd2[updates] = running_editd_train
dict_validation_editd2[updates] = cer
dict_training_loss2[updates] = np.mean(loss_iteration)
dict_training_acc2[updates] = np.mean(val_losses)
dict_validation_loss2[updates] = np.mean(acc_iteration)
dict_validation_acc2[updates] = np.mean(val_acc)
else:
writer.add_scalar('Loss/train', np.mean(loss_iteration), updates)
writer.add_scalar('Loss/validation', np.mean(val_losses), updates)
writer.add_scalar('Accuracy/train', np.mean(acc_iteration), updates)
writer.add_scalar('Accuracy/validation', np.mean(val_acc), updates)
if editD:
#writer.add_scalar('Edit Distance/train', running_editd_train, updates)
writer.add_scalar('Edit Distance/validation', cer, updates)
#dict_training_editd2[updates] = running_editd_train #/ float(make_validation)
dict_validation_editd2[updates] = cer
dict_training_loss2[updates] = np.mean(loss_iteration)
dict_training_acc2[updates] = np.mean(val_losses)
dict_validation_loss2[updates] = np.mean(acc_iteration)
dict_validation_acc2[updates] = np.mean(val_acc)
valid_loss = running_loss_val / float(iteration_val + 1)
running_loss_train = 0.0
running_loss_val = 0.0
running_acc_train = 0.0
running_acc_val = 0.0
running_editd_train = 0.0
running_editd_val = 0.0
print("=" * 100)
print("Epoch: {}/{}...".format(epoch+1, n_epochs),
"Loss: {:.6f}...".format(epoch_loss / float(iteration + 1)),
"Accuarcy: {:.6f}...".format((epoch_acc / float(iteration + 1)) * 100),
"Val Loss: {:.6f}...".format(epoch_loss_val / float(iteration_val + 1)),
"Val Accuracy: {:.6f}%...".format((epoch_acc_val / float(iteration_val + 1)) * 100))
print("=" * 100)
dict_validation_loss[epoch] = val_losses
dict_validation_acc[epoch] = val_acc
if editD:
dict_validation_editd[epoch] = val_editd
# early_stopping needs the validation loss to check if it has decresed,
# and if it has, it will make a checkpoint of the current model
if earlyStopping:
early_stopping(np.mean(val_losses), model, optimizer, updates)
if early_stopping.early_stop:
print("Early stopping")
break
if reduce_lr:
scheduler.step(loss_val)
updates +=1
dict_training_loss[epoch] = loss_iteration
dict_training_acc[epoch] = acc_iteration
if editD:
dict_training_editd[epoch] = editd_iteration
writer.close()
if earlyStopping:
checkpoint = torch.load(file_name)
model.load_state_dict(checkpoint["model"])
return ([[dict_training_loss, dict_validation_loss, dict_training_acc, dict_validation_acc, dict_training_editd, dict_validation_editd],
[dict_training_loss2, dict_validation_loss2, dict_training_acc2, dict_validation_acc2, dict_training_editd2, dict_validation_editd2],
[dict_activations_in, dict_activations_forget, dict_activations_cell, dict_activations_out],
[dict_activations_in_decoder, dict_activations_forget_decoder, dict_activations_cell_decoder, dict_activations_out_decoder],
[dict_weights, dict_gradients]])
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def init_weights_orthogonal_lstm(m):
for name, param in m.named_parameters():
if "weight" in name and len(list(param.data.size())) > 1:
nn.init.orthogonal_(param.data)
def plot_error_accuarcy(input, pdf=None, steps=50, validation=True, editD=True):
sns.set(font_scale=1)
loss_train, loss_val, acc_train, acc_val, editd_train, editd_val = input[0], input[1], input[2], input[3], input[4], input[5]
fig = plt.figure(figsize=(18,2))
fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2)
ax = fig.add_subplot(1, 3, 1)
ax.plot(list(loss_train.keys()), list(loss_train.values()), label="training error")
if validation:
ax.plot(list(loss_val.keys()), list(loss_val.values()), label="validation error")
plt.xlabel("Updates")
plt.ylabel("Error")
plt.legend()
plt.title("Error vs. updates")
ax = fig.add_subplot(1, 3, 2)
ax.plot(list(acc_train.keys()), [v*100 for v in list(acc_train.values())], label="training accuracy")
if validation:
ax.plot(list(acc_val.keys()), [v*100 for v in list(acc_val.values())], label="validation accuracy")
plt.xlabel("Updates")
plt.ylabel("Accuracy in %")
plt.legend()
plt.title("Accuracy vs. updates")
if editD:
ax = fig.add_subplot(1, 3, 3)
ax.plot(list(editd_train.keys()), list(editd_train.values()), label="training edit distance")
if validation:
ax.plot(list(editd_val.keys()), list(editd_val.values()), label="validation edit distance")
plt.xlabel("Updates")
plt.ylabel("Normalized Edit Distance")
plt.legend()
plt.title("Edit Distance vs. updates")
if pdf is not None:
pdf.savefig(fig, bbox_inches="tight")
plt.close("all")
def plot_error_accuarcy_iterations_train(input, pdf=None, editD=True):
sns.set(font_scale=1)
loss_train, acc_train, editd_train = input[0], input[2], input[4]
fig = plt.figure(figsize=(18,2))
fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2)
ax = fig.add_subplot(1, 3, 1)
ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_train.values()))))),
np.concatenate(np.array(list(loss_train.values()))), label="training error")
plt.xlabel("Updates")
plt.ylabel("Error")
plt.legend()
plt.title("Error vs. updates from trainings set")
ax = fig.add_subplot(1, 3, 2)
ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_train.values()))))),
[v*100 for v in np.concatenate(np.array(list(acc_train.values())))], label="training accuracy")
plt.xlabel("Updates")
plt.ylabel("Accuracy in %")
plt.legend()
plt.title("Accuracy vs. updates from trainings set")
if editD:
ax = fig.add_subplot(1, 3, 3)
ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_train.values()))))),
np.concatenate(np.array(list(editd_train.values()))), label="training edit distance")
plt.xlabel("Updates")
plt.ylabel("Normalized Edit Distance")
plt.legend()
plt.title("Edit Distance vs. updates from trainings set")
if pdf is not None:
pdf.savefig(fig, bbox_inches="tight")
plt.close("all")
def plot_error_accuarcy_iterations_val(input, pdf=None, editD=True):
sns.set(font_scale=1)
loss_val, acc_val, editd_val = input[1], input[3], input[5]
fig = plt.figure(figsize=(18,2))
fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2)
ax = fig.add_subplot(1, 3, 1)
ax.plot(np.arange(0, len(np.concatenate(np.array(list(loss_val.values()))))),
np.concatenate(np.array(list(loss_val.values()))), label="validation error")
plt.xlabel("Updates")
plt.ylabel("Error")
plt.legend()
plt.title("Error vs. updates from validation set")
ax = fig.add_subplot(1, 3, 2)
ax.plot(np.arange(0, len(np.concatenate(np.array(list(acc_val.values()))))),
[v*100 for v in np.concatenate(np.array(list(acc_val.values())))], label="validation accuracy")
plt.xlabel("Updates")
plt.ylabel("Accuracy in %")
plt.legend()
plt.title("Accuracy vs. updates from validation set")
if editD:
ax = fig.add_subplot(1, 3, 3)
ax.plot(np.arange(0, len(np.concatenate(np.array(list(editd_val.values()))))),
np.concatenate(np.array(list(editd_val.values()))), label="validation edit distance")
plt.xlabel("Updates")
plt.ylabel("Normalized Edit Distance")
plt.legend()
plt.title("Edit distance vs. updates from validation set")
if pdf is not None:
pdf.savefig(fig, bbox_inches="tight")
plt.close("all")
def plot_activations(input, pdf=None, print_epoch=50000, title=""):
sns.set(font_scale=1)
fig = plt.figure(figsize=(13,4))
fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.5)
i = 0
for p, label in zip(input, ["input gate", "forget gate", "cell activation", "out gate"]):
i += 1
ax = fig.add_subplot(2, 2, i)
for epoch in p.keys():
if epoch % print_epoch == 0 or epoch == max(p.keys()):
x = np.arange(0, len(p[epoch].detach().numpy()))
# this locator puts ticks at regular intervals
if epoch == 0:
ax.plot(np.arange(0, len(p[epoch].detach().numpy())),
p[epoch].detach().numpy(), label="update {}".format(epoch), color="#000000", alpha=0.8)
else:
ax.plot(np.arange(0, len(p[epoch].detach().numpy())),
p[epoch].detach().numpy(), label="update {}".format(epoch))
plt.xlabel("Time Steps")
plt.ylabel("Activation")
if i == 1:
plt.legend(bbox_to_anchor=(1.05, 1.05))
plt.title("{} {}".format(label, title))
if pdf is not None:
pdf.savefig(fig, bbox_inches="tight")
plt.close("all")
def plot_heatmap(dict_weights,save_files_path, filename, split_LSTMbiases = False):
x_len = list(dict_weights.keys())
y_len = list(dict_weights[x_len[0]].keys())
input = np.zeros((len(y_len), len(x_len)))
input_grad = np.zeros((len(y_len), len(x_len)))
if split_LSTMbiases:
y_len_biases = []
for name in y_len:
if "bias" in name and "lstm" in name and "linear" not in name:
for b in ["input", "forget", "cell", "output"]:
y_len_biases.append(name + "." + b)
input_biases = np.zeros((len(y_len_biases), len(x_len)))
input_grad_biases = np.zeros((len(y_len_biases), len(x_len)))
for idx, u in enumerate(x_len):
idx_b = 0
matrix_param = dict_weights[u]
for idx_p, p in enumerate(y_len):
if len(matrix_param[p].shape) > 2: # and matrix_param[p].shape[1] == 1: # (256, 1, 11)
m = matrix_param[p].reshape((matrix_param[p].shape[0], -1))
else:
m = matrix_param[p]
input[idx_p, idx] = np.linalg.norm(m, ord=2)
if split_LSTMbiases and "bias" in p and "lstm" in p and "linear" not in p:
n = matrix_param[p].shape[0]
# input gate
start, end = 0, n//4
input_biases[idx_b, idx] = np.linalg.norm(m[start: end], ord=2)
# forget gate
start, end = n//4, n//2
input_biases[idx_b+1, idx] = np.linalg.norm(m[start: end], ord=2)
# cell gate
start, end = n//2, n//2 + n//4
input_biases[idx_b+2, idx] = np.linalg.norm(m[start: end], ord=2)
# output gate
start, end = n//2 + n//4, n
input_biases[idx_b+3, idx] = np.linalg.norm(m[start: end], ord=2)
idx_b += 4
y_len = ["\n".join([".".join([x.split(".")[0], x.split(".")[1]]), x.split(".")[2]]) for x in y_len]
df = pd.DataFrame(input, index=y_len, columns=x_len)
print(df.head())
sns.set(font_scale=0.4)
svm = sns.heatmap(df, linewidths=0.0, edgecolor="none")
figure = svm.get_figure()
figure.savefig(save_files_path + "/heatmap_{}.pdf".format(filename))
plt.clf()
if split_LSTMbiases:
y_len_biases = ["\n".join([".".join([x.split(".")[0], x.split(".")[3]]), x.split(".")[2]]) for x in y_len_biases]
df_b = pd.DataFrame(input_biases, index=y_len_biases, columns=x_len)
print(df_b.head())
sns.set(font_scale=0.4)
svm = sns.heatmap(df_b, linewidths=0.0, edgecolor="none")
figure2 = svm.get_figure()
figure2.savefig(save_files_path + "/heatmap_{}_biases.pdf".format(filename))
plt.clf()
def bestPerformance2File(input, fname, editD=True):
loss_train, loss_val, acc_train, acc_val, editd_train, editd_val = input[0], input[1], input[2], input[3], input[4], input[5]
max_idx_train = max(acc_train, key=lambda k: acc_train[k])
max_idx_val = max(acc_val, key=lambda k: acc_val[k])
f = open(fname, "w")
if editD:
f.write("best performances on trainings set\n")
f.write("trainings acc\tvalidation acc\ttrainings loss\tvalidation loss\ttrainings edit distance\tvalidation edit distance\tupdate\n")
f.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train], editd_train[max_idx_train], editd_val[max_idx_train], max_idx_train))
f.write("\nbest performances on validation set\n")
f.write("trainings acc\tvalidation acc\ttrainings loss\tvalidation loss\ttrainings edit distance\tvalidation edit distance\tupdate\n")
f.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val], editd_train[max_idx_val], editd_val[max_idx_val], max_idx_val))
else:
f.write("best performances on trainings set\n")
f.write("trainings acc\tvalidation acc\ttrainings loss\tvalidation loss\tupdate\n")
f.write("{}\t{}\t{}\t{}\t{}\n".format(acc_train[max_idx_train], acc_val[max_idx_train], loss_train[max_idx_train], loss_val[max_idx_train], max_idx_train))
f.write("\nbest performances on validation set\n")
f.write("trainings acc\tvalidation acc\ttrainings loss\tvalidation loss\tupdate\n")
f.write("{}\t{}\t{}\t{}\t{}\n".format(acc_train[max_idx_val], acc_val[max_idx_val], loss_train[max_idx_val], loss_val[max_idx_val], max_idx_val))
f.close()
def basecalling(argv):
parser = make_argparser()
args = parser.parse_args(argv[1:])
infile = args.input
fname = args.output
port = args.gpu_port
SEED = args.set_seed
batch = args.batch_size
epochs = args.epochs
make_validation = args.make_validation
teacher = args.tf_ratio
reduced_TF = args.reduced_tf
earlyStopping = args.early_stop
patience_earlyStop = args.patience
weight_decay = args.weight_decay #0.01 #0.01
clipping_value = args.gradient_clip
# LSTM
hidden = args.hidden_units #256
forget_bias = args.forget_bias_encoder
forget_bias_decoder = args.forget_bias_decoder
num_layers = args.lstm_layers
bidir = args.bi_lstm
attention = args.attention
dropout = args.dropout
# CNN
input_bias_cnn = args.input_bias_cnn
strides = args.strides
kernel = args.kernel
cnn_out = args.channel_number #256
pooling_type = args.pooling_type #"average"
n_layers_cnn = args.cnn_layers
batch_normalization = args.batch_norm
dropout_on = args.dropout_cnn
dropout_input = args.dropout_input
dropout_probability = args.drop_prob
call = args.call
lr = args.learning_rate
editD = args.editD
sgd = False
out_classes = 5
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
# Load data
dict_classes = {0: "A", 1: "C", 2: "G", 3: "T", 4: "<EOF>", 5: "<PAD>"} # A=0, C=1, G=2, T=3, EOF=4
script_dir = os.path.dirname(os.path.realpath('__file__')) # script directory
file_out = script_dir + "/" + infile
print(file_out)
with open(file_out, 'rb') as handle:
read_data = pickle.load(handle)
save_files_path = script_dir + '/training_result_{}/'.format(fname)
writer = SummaryWriter(script_dir + '/training_result_{}'.format(fname))
train_set = read_data[0]
val_set = read_data[1]
train_set = [train_set[0].cuda(port), train_set[1].cuda(port), train_set[2].cuda(port),
train_set[3].cuda(port), train_set[4].cuda(port), train_set[5].cuda(port)]
val_set = [val_set[0].cuda(port), val_set[1].cuda(port), val_set[2].cuda(port),
val_set[3].cuda(port), val_set[4].cuda(port), val_set[5].cuda(port)]
print("train: ", train_set[0].size(), train_set[1].size(), train_set[2].size(),
train_set[3].size(), train_set[4].size(), train_set[5].size())
print("validation: ", val_set[0].size(), val_set[1].size(), val_set[2].size(),
val_set[3].size(), val_set[4].size(), val_set[5].size())
# [batch size] is typically chosen between 1 and a few hundreds, e.g. [batch size] = 32 is a good default value
CNN = SimpleCNN(input_channel=1, output_channel=[cnn_out,cnn_out, cnn_out] , kernel_size=kernel,
stride=strides, padding=[0,0,0], pooling=pooling_type, layers=n_layers_cnn,
batch_norm=batch_normalization,
dropout = dropout_on, dropout_p = dropout_probability, dropout_input = dropout_input, input_bias_cnn=input_bias_cnn)
out_channels = CNN.output_channel[n_layers_cnn-1]
lstm = LSTMencoder(input_dim=out_channels, hidden_dim = hidden,
batch_size=batch, output_dim=hidden,
num_layers=num_layers, own_cell_encoder=False, bidirectional = bidir, port=port, dropout=dropout)
lstm_dec = LSTMdecoder(input_dim=out_classes, hidden_dim = hidden,
batch_size=batch, output_dim=out_classes,
num_layers=num_layers, own_cell_decoder=False, bidirectional = bidir, port=port, attention=attention, dropout=dropout)
model12 = Seq2Seq(cnn_encoder = CNN, lstm_encoder = lstm, lstm_decoder = lstm_dec)#.cuda(port)
model12.apply(init_weights_orthogonal_lstm)
for name, param in model12.named_parameters():
if "bias" in name:
if forget_bias != "None" and "lstm_encoder" in name:
print(name,param.data.size())
n = param.size(0)
# forget gate
start, end = n//4, n//2 # ordering ingate, forgetgate, cellgate, outgate
param.data[start:end].fill_(float(int(forget_bias)))
print(start, end)
# ingate
start, end = 0, n//4 # ordering ingate, forgetgate, cellgate, outgate
param.data[start:end].fill_(0.)
print(start, end)
# cellgate, outgate
start, end = n//2, n # ordering ingate, forgetgate, cellgate, outgate
param.data[start:end].fill_(0.)
print(start, end)
if forget_bias_decoder != "None" and "lstm_decoder" in name and "linear" not in name:
print(name,param.data.size())
n = param.size(0)
# forget gate
start, end = n//4, n//2 # ordering ingate, forgetgate, cellgate, outgate
param.data[start:end].fill_(float(int(forget_bias_decoder)))
print(start, end)
# ingate
start, end = 0, n//4 # ordering ingate, forgetgate, cellgate, outgate
param.data[start:end].fill_(0.)
print(start, end)
# cellgate, outgate
start, end = n//2, n # ordering ingate, forgetgate, cellgate, outgate
param.data[start:end].fill_(0.)
print(start, end)
model12 = model12.cuda(port)
print(model12, next(model12.parameters()).is_cuda)
if sgd:
optimizer = optim.SGD(model12.parameters(), lr=lr, momentum=0)
else:
optimizer = optim.Adam(model12.parameters(), lr=lr, weight_decay=weight_decay, betas=(0.9,0.999))
criterion = torch.nn.NLLLoss(ignore_index=5)#.cuda(port)
print(f'The model has {count_parameters(model12):,} trainable parameters')
f = open(save_files_path + "{}_sig_length.txt".format(fname), "w")
f.write(infile + " \n")
f.write("Training: \nSignal:\n")
f.write("{}\ttrue signal length: {}\n".format(train_set[0].size(), train_set[3]))
f.write("\nTarget:\n")
f.write("{}\ttrue target length: {}\n".format(train_set[1].size(), train_set[4]))
f.write("\nRead idx:\n")
f.write("{}\n\n".format(train_set[5]))
f.write("Validation: \nSignal:\n")
f.write("{}\ttrue signal length: {}\n".format(val_set[0].size(), val_set[3]))
f.write("\nTarget:\n")
f.write("{}\ttrue target length: {}\n".format(val_set[1].size(), val_set[4]))
f.write("\nRead idx:\n")
f.write("{}\n\n".format(val_set[5]))
f.write("Model:\n")
f.write(str(model12))
f.write("\nThe model has {:,} trainable parameters\n".format(count_parameters(model12)))
f.write("hyperparameters:\n")
f.write("epochs={}, batch size={}, earlyStopping={}, patience={}, weight decay={}, clipping value={}, lr={}\n"
.format(epochs, batch, earlyStopping, patience_earlyStop, weight_decay, clipping_value, lr))
f.write("TF={}, reduced TF ratio={}\n".format(teacher, reduced_TF))
f.write("forget gate bias encoder={}, forget gate bias decoder={}"
.format(forget_bias, forget_bias_decoder))
f.close()
# with 10 reads, kernel size = 11
start = time.time()
out12 = trainNet(
model12, train_ds = train_set, optimizer=optimizer,
criterion=criterion, clipping_value=clipping_value,
val_ds = val_set,
batch_size=batch, n_epochs=epochs, teacher_forcing_ratio=teacher,
make_validation=make_validation, file_name=save_files_path + "{}_checkpoint.pt".format(fname),
earlyStopping=earlyStopping, patience=patience_earlyStop, writer=writer, editD=editD)
end = time.time()
hours, rem = divmod(end-start, 3600)
minutes, seconds = divmod(rem, 60)
print("=" * 100)
checkpoint = {
'updates': out12[-1],
'model': model12.state_dict(),
'optimizer': optimizer._optimizer.state_dict()}
torch.save(checkpoint, save_files_path + '{}.pt'.format(fname))
#np.savez_compressed(save_files_path + '{}_weightsGradients.npz'.format(fname), weights=out12[4][0], gradients=out12[4][1])
pickle.dump(out12, open(save_files_path + "{}.p".format(fname), "wb" ))
with PdfPages(save_files_path + "{}.pdf".format(fname)) as pdf:
plot_error_accuarcy(out12[1], pdf, editD=editD)
bestPerformance2File(out12[1], save_files_path + "best_performances_{}.txt".format(fname), editD=editD)
plot_error_accuarcy_iterations_train(out12[0], pdf, editD=editD)
plot_error_accuarcy_iterations_val(out12[0], pdf, editD=editD)
print("Training took: {:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
if __name__ == '__main__':
sys.exit(basecalling(sys.argv))
| 1.390625 | 1 |
interview/migrations/0004_auto_20200829_1018.py | Jacklovely/django | 63 | 12792219 | # Generated by Django 3.1 on 2020-08-29 02:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('interview', '0003_auto_20200828_2215'),
]
operations = [
migrations.AlterModelOptions(
name='candidate',
options={'permissions': [('export', 'Can export candidate list'), ('notify', 'notify interviewer for candidate review')], 'verbose_name': '应聘者', 'verbose_name_plural': '应聘者'},
),
]
| 1.59375 | 2 |
testbook/reference.py | loichuder/testbook | 291 | 12792220 | <reponame>loichuder/testbook
from .exceptions import (
TestbookExecuteResultNotFoundError,
TestbookAttributeError,
TestbookSerializeError,
TestbookRuntimeError
)
from .utils import random_varname
from .translators import PythonTranslator
class TestbookObjectReference:
def __init__(self, tb, name):
self.tb = tb
self.name: str = name
@property
def _type(self):
return self.tb.value(f"type({self.name}).__name__")
def __repr__(self):
return repr(self.tb.value(f"repr({self.name})"))
def __getattr__(self, name):
if self.tb.value(f"hasattr({self.name}, '{name}')"):
return TestbookObjectReference(self.tb, f"{self.name}.{name}")
raise TestbookAttributeError(f"'{self._type}' object has no attribute {name}")
def __eq__(self, rhs):
return self.tb.value(
"{lhs} == {rhs}".format(lhs=self.name, rhs=PythonTranslator.translate(rhs))
)
def __len__(self):
return self.tb.value(f"len({self.name})")
def __iter__(self):
iterobjectname = f"___iter_object_{random_varname()}"
self.tb.inject(f"""
{iterobjectname} = iter({self.name})
""")
return TestbookObjectReference(self.tb, iterobjectname)
def __next__(self):
try:
return self.tb.value(f"next({self.name})")
except TestbookRuntimeError as e:
if e.eclass is StopIteration:
raise StopIteration
else:
raise
def __getitem__(self, key):
try:
return self.tb.value(f"{self.name}.__getitem__({PythonTranslator.translate(key)})")
except TestbookRuntimeError as e:
if e.eclass is TypeError:
raise TypeError(e.evalue)
elif e.eclass is IndexError:
raise IndexError(e.evalue)
else:
raise
def __setitem__(self, key, value):
try:
return self.tb.inject("{name}[{key}] = {value}".format(
name=self.name,
key=PythonTranslator.translate(key),
value=PythonTranslator.translate(value)
), pop=True)
except TestbookRuntimeError as e:
if e.eclass is TypeError:
raise TypeError(e.evalue)
elif e.eclass is IndexError:
raise IndexError(e.evalue)
else:
raise
def __contains__(self, item):
return self.tb.value(f"{self.name}.__contains__({PythonTranslator.translate(item)})")
def __call__(self, *args, **kwargs):
code = self.tb._construct_call_code(self.name, args, kwargs)
try:
return self.tb.value(code)
except TestbookExecuteResultNotFoundError:
# No return value from function call
pass
except TestbookSerializeError as e:
return TestbookObjectReference(self.tb, e.save_varname)
def resolve(self):
return self.tb.value(self.name)
| 2.28125 | 2 |
wavenet_tf/train.py | binh-vu/p699 | 0 | 12792221 | """Training script for the WaveNet network on the VCTK corpus.
This script trains a network with the WaveNet using data from the VCTK corpus,
which can be freely downloaded at the following site (~10 GB):
http://homepages.inf.ed.ac.uk/jyamagis/page3/page58/page58.html
"""
from __future__ import print_function
import argparse
import glob
import json
from dataclasses import dataclass
import numpy as np
import os
import sys
import time
from datetime import datetime
from pathlib import Path
import tensorflow as tf
from tensorflow.python.client import timeline
from tqdm.auto import tqdm
from wavenet_tf import WaveNetModel, optimizer_factory
from wavenet_tf.data_io import get_train_dataset
ROOT_DIR = Path(os.path.abspath(__file__)).parent.parent
STARTED_DATESTRING = "{0:%Y-%m-%dT%H-%M-%S}".format(datetime.now())
@dataclass
class TrainParams:
data_dir: str = str(ROOT_DIR / 'data' / 'fma_small_25_16000')
log_dir: str = str(ROOT_DIR / "logdir")
checkpoint_every: int = 1000
num_steps: int = int(1e5)
batch_size: int = 1
sample_size: int = 100000
learning_rate: float = 1e-4
max_to_keep: int = 5
store_metadata: bool = False
l2_regularization_strength: float = 0.0
max_checkpoints: int = 5
def train(args: TrainParams, net, optimizer):
# Load raw waveform from VCTK corpus.
with tf.name_scope('create_inputs'):
# Allow silence trimming to be skipped by specifying a threshold near
dataset, n_examples = get_train_dataset(os.path.join(args.data_dir, "*.npz"), args.sample_size)
epoch_size = n_examples // args.batch_size
dataset = dataset.repeat().batch(args.batch_size)
iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
audio_batch = iterator.get_next()
if args.l2_regularization_strength == 0:
args.l2_regularization_strength = None
loss = net.loss(input_batch=audio_batch,
global_condition_batch=None,
l2_regularization_strength=args.l2_regularization_strength)
trainable = tf.compat.v1.trainable_variables()
optim = optimizer.minimize(loss, var_list=trainable)
# Set up logging for TensorBoard.
writer = tf.compat.v1.summary.FileWriter(args.log_dir)
writer.add_graph(tf.compat.v1.get_default_graph())
run_metadata = tf.compat.v1.RunMetadata()
summaries = tf.compat.v1.summary.merge_all()
# Set up session
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=False))
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
sess.run(iterator.initializer)
# Saver for storing checkpoints of the model.
saver = tf.compat.v1.train.Saver(var_list=tf.compat.v1.trainable_variables(), max_to_keep=args.max_checkpoints)
try:
saved_global_step = load(saver, sess, args.log_dir)
if saved_global_step is None:
# The first training step will be saved_global_step + 1,
# therefore we put -1 here for new or overwritten trainings.
saved_global_step = -1
except:
print("Something went wrong while restoring checkpoint. "
"We will terminate training to avoid accidentally overwriting "
"the previous model.")
raise
step = None
last_saved_step = saved_global_step
try:
total = args.num_steps - saved_global_step - 1
pbar = tqdm(
total=total,
initial=saved_global_step + 1,
desc=f'train (epoch-size={epoch_size}, #epoch={total // epoch_size})')
for step in range(saved_global_step + 1, args.num_steps):
if args.store_metadata and step % 50 == 0:
# Slow run that stores extra information for debugging.
print('Storing metadata')
run_options = tf.RunOptions(
trace_level=tf.RunOptions.FULL_TRACE)
summary, loss_value, _ = sess.run(
[summaries, loss, optim],
options=run_options,
run_metadata=run_metadata)
writer.add_summary(summary, step)
writer.add_run_metadata(run_metadata,
'step_{:04d}'.format(step))
tl = timeline.Timeline(run_metadata.step_stats)
timeline_path = os.path.join(args.log_dir, 'timeline.trace')
with open(timeline_path, 'w') as f:
f.write(tl.generate_chrome_trace_format(show_memory=True))
else:
summary, loss_value, _ = sess.run([summaries, loss, optim])
writer.add_summary(summary, step)
pbar.update(1)
pbar.set_postfix(step=step, loss=loss_value, epoch=step // epoch_size)
if step > 0 and step % args.checkpoint_every == 0:
save(saver, sess, args.log_dir, step)
last_saved_step = step
except KeyboardInterrupt:
# Introduce a line break after ^C is displayed so save message
# is on its own line.
print()
finally:
if step > last_saved_step:
save(saver, sess, args.log_dir, step)
def save(saver, sess, logdir, step):
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
print('Storing checkpoint to {} ...'.format(logdir), end="")
sys.stdout.flush()
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print(' Done.')
def load(saver, sess, logdir):
print("Trying to restore saved checkpoints from {} ...".format(logdir),
end="")
ckpt = tf.train.get_checkpoint_state(logdir)
if ckpt:
print(" Checkpoint found: {}".format(ckpt.model_checkpoint_path))
global_step = int(ckpt.model_checkpoint_path
.split('/')[-1]
.split('-')[-1])
print(" Global step was: {}".format(global_step))
print(" Restoring...", end="")
saver.restore(sess, ckpt.model_checkpoint_path)
print(" Done.")
return global_step
else:
print(" No checkpoint found.")
return None
def get_default_logdir(logdir_root):
logdir = os.path.join(logdir_root, 'train', STARTED_DATESTRING)
return logdir
if __name__ == '__main__':
args = TrainParams()
with open('./data/tf_wavenet_params.json', 'r') as f:
wavenet_params = json.load(f)
model = WaveNetModel(
batch_size=args.batch_size,
dilations=wavenet_params["dilations"],
filter_width=wavenet_params["filter_width"],
residual_channels=wavenet_params["residual_channels"],
dilation_channels=wavenet_params["dilation_channels"],
skip_channels=wavenet_params["skip_channels"],
quantization_channels=wavenet_params["quantization_channels"],
use_biases=wavenet_params["use_biases"],
scalar_input=wavenet_params["scalar_input"],
initial_filter_width=wavenet_params["initial_filter_width"],
histograms=False,
global_condition_channels=None,
global_condition_cardinality=None)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=1e-4,
epsilon=1e-4)
train(args, model, optimizer)
| 2.640625 | 3 |
project-django/cry_pragati/usermodel/serializer.py | anuragrawat19/django_restframework | 0 | 12792222 | <reponame>anuragrawat19/django_restframework
'''writing a serializer for the models so that
state of model objects can be converted into a
native python datatypes that can be easily rendered into JSON,XML'''
from rest_framework import serializers
from .models import CarBrands, EmployeeDesignations, Employees, Snippet, LANGUAGE_CHOICES, STYLE_CHOICES, Persons, PersonTasks
# creating a serializer for Snippet model
# SnippetSerializer using 'serializers'
class SnippetSerializerA(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
title = serializers.CharField(max_length=100)
code = serializers.CharField(style={'base_template': 'textarea.html'})
linenos = serializers.BooleanField(required=False)
language = serializers.ChoiceField(
choices=LANGUAGE_CHOICES, default='python')
style = serializers.ChoiceField(choices=STYLE_CHOICES, default='friendly')
def create(self, validate_data):
'''create and return a new Snippet'''
return Snippet.objects.create(**validate_data)
def update(self, instance, validate_data):
''' Update and return an existing snippet'''
instance.title = validate_data.get('title', instance.title)
instance.code = validate_data.get('code', instance.code)
instance.linenos = validate_data.get('linenos', instance.linenos)
instance.language = validate_data.get('language', instance.language)
instance.style = validate_data.get('style', instance.style)
instance.save()
return instance
# SnippetSerializer using 'ModelSerializer'
class SnippetSerializerB(serializers.ModelSerializer):
class Meta:
model = Snippet
fields = "__all__"
# creating a serializer for Roles Model
class CarBrandsSerializer(serializers.ModelSerializer):
class Meta:
model = CarBrands
fields = '__all__'
# creating serializer for list of all the names of employee in Employees Model
class EmployeeNameSerializer(serializers.ModelSerializer):
class Meta:
model = Employees
fields = ["employee_name"]
# creating a serialzer for getting details of all the employees
class EmpDetailSerializer(serializers.ModelSerializer):
def length(self, value):
if len(value) != 10:
raise serializers.ValidationError(
" contact should contain 10 digits only")
designations = serializers.SlugRelatedField(
slug_field="designation_name", read_only=True)
employee_name = serializers.CharField(max_length=50)
contact = serializers.CharField(validators=[length])
# validation for employee_name field that it should contain mr or mrs
def validate_employee_name(self, value):
a = "mr"
if a not in value.lower():
raise serializers.ValidationError(
'this employee name should contain Mr or Mrs')
return value
class Meta:
model = Employees
fields = "__all__"
class EmployeeDesignationsSerializer(serializers.Serializer):
designation_name = serializers.CharField(
required=True, allow_blank=False, max_length=100)
def create(self, validate_data):
'''create and return a new Snippet'''
return EmployeeDesignations.objects.create(**validate_data)
def update(self, instance, validate_data):
''' Update and return an existing snippet'''
instance.designation_name = validate_data.get(
'designaion_name', instance.designation_name)
instance.save()
return instance
# ------------------------------------------------------------------------------------------------------------------------------------------------
class PersonSerializer(serializers.ModelSerializer):
opentasks = serializers.SerializerMethodField("open_task")
def open_task(self, user):
return len(user.tasks.filter(status=1))
class Meta:
model = Persons
fields = "__all__"
| 2.484375 | 2 |
fly/response.py | tatsuya4649/fly | 16 | 12792223 | <filename>fly/response.py
from ._fly_server import _fly_response
import json
from .cookie import *
from .exceptions import *
class _Response(_fly_response):
@property
def status_code(self):
raise NotImplementedError(
"_Response must have status_code attr"
)
@property
def header(self):
raise NotImplementedError(
"_Response must have header attr"
)
@property
def header_len(self):
raise NotImplementedError(
"_Response must have header attr"
)
@property
def body(self):
raise NotImplementedError(
"_Response must have body attr"
)
@property
def content_type(self):
raise NotImplementedError(
"_Response must have content_type attr"
)
class Response(_Response):
"""
All Response subclass must have 5 attributes.
- status_code: default 200
- header: default: []
- body: default: bytes()
- content_type: default: text/plain
"""
def __init__(
self,
status_code=200,
header=None,
body=None,
content_type="text/plain",
):
if not isinstance(status_code, int):
raise TypeError("status_code must be int type.")
if header is not None and not isinstance(header, (list)):
raise TypeError("status_code must be list type.")
if not isinstance(content_type, str):
raise TypeError("content_type must be str type.")
if body is not None and not isinstance(body, (bytes)):
raise TypeError("body must be bytes type.")
self._status_code = status_code
self._content_type = content_type
self._header = list()
self._body = body if body is not None else bytes()
@property
def status_code(self):
return self._status_code
@property
def header(self):
return self._header
@property
def header_len(self):
return len(self._header)
@property
def body(self):
return self._body
@property
def content_type(self):
return self._content_type
def add_header(self, name, value):
hdr_elem = dict()
hdr_elem["name"] = name
hdr_elem["value"] = value
self._header.append(hdr_elem)
def set_cookie(
self,
name,
value,
**kwards
):
hdr_elem = dict()
hdr_elem["name"] = "Set-Cookie"
value = header_value_from_cookie(name, value, **kwards)
hdr_elem["value"] = value
self._header.append(hdr_elem)
class PlainResponse(Response):
def __init__(
self,
status_code=200,
header=None,
body=None,
):
if body is not None and not isinstance(body, str):
raise TypeError("body must be str type.")
super().__init__(
status_code,
header,
body.encode("utf-8") if isinstance(body, str) else None,
content_type="text/plain"
)
class HTMLResponse(Response):
def __init__(
self,
status_code=200,
header=None,
body=None,
):
if body is not None and not isinstance(body, str):
raise TypeError("body must be str type.")
super().__init__(
status_code,
header,
body.encode("utf-8") if body is not None else None,
content_type="text/html"
)
class JSONResponse(Response):
def __init__(
self,
status_code=200,
header=None,
body=None,
):
if body is not None and not isinstance(body, dict) and \
not isinstance(body, list):
raise TypeError("body must be list/dict type.")
super().__init__(
status_code,
header,
json.dumps(body).encode("utf-8") \
if body is not None and len(body) > 0 \
else None,
content_type="application/json"
)
| 2.921875 | 3 |
dominating_set_testing.py | rbbi/ListMatricesAndRecursionCoursework | 1 | 12792224 | <filename>dominating_set_testing.py<gh_stars>1-10
def IsDominatingSet(A,S):
NAV=[]
for i in range(len(S)):
dominator=S[i]
vertex=A[dominator]
NA=GetNonAdjacents(vertex,[],len(vertex)-1,dominator)
NAV.append(NA)
ToCheck=RemoveS(NAV,S)
return Check(A,S,ToCheck)
def Check(A,S,NAV):
for i in range(len(NAV)):
any_adj=False
for j in range(len(S)):
is_adj=IsAdjacent(A,S[j],NAV[i])
if (is_adj is True):
any_adj=True
if (any_adj is False):
return False
return True
def RemoveS(NAV,S):
NAV=Flatten(NAV)
for i in range(len(S)):
if (S[i] in NAV):
NAV.remove(S[i])
return NAV
def Flatten(L):
Flat=[]
for sub in L:
for val in sub:
Flat.append(val)
return Flat
def IsAdjacent(A,v1,v2):
if (v1==v2):
return False
return A[v1][v2] == 1
def GetNonAdjacents(V,NA,i,s):
if (i==-1):
return NA
cell=V[i]
if (i !=s and cell != 1):
NA.append(i)
i=i-1
return GetNonAdjacents(V,NA,i,s)
if __name__ == "__main__":
A=[
[0,1,1,0,0],
[1,0,1,1,0],
[1,1,0,0,1],
[0,1,0,0,1],
[0,0,1,1,0]]
S=[1,4]
print(IsDominatingSet(A,S))
| 2.953125 | 3 |
src/run_for_every.py | e2t/cli-tools | 0 | 12792225 | <reponame>e2t/cli-tools
"""
Рекурсивный запуск указанного процесса для каждого из указанных файлов.
Программа принимает в качестве аргументов путь к процессу, его параметры и
список файлов. Затем процесс запускается с указанными параметрами + имя одного
из файлов. Запуск может производиться последовательно или параллельно,
т.е. ожидая завершения предыдущего процесса или все одновременно.
"""
import os
import subprocess
import argparse
SEPARATOR = '--'
USAGE = '%(prog)s [-h] [--wait] -- program [options] -- file [file ...]'
def main() -> None:
arg_parser = argparse.ArgumentParser(usage=USAGE)
arg_parser.add_argument('--wait', action='store_const', const=True,
help='ожидать завершения каждого процесса')
arg_parser.add_argument('argv', nargs='+')
args = arg_parser.parse_args()
if len(args.argv) < 3:
exit(1)
if SEPARATOR not in args.argv:
exit(1)
separator_index = args.argv.index(SEPARATOR)
if separator_index < 1:
exit(1)
cmd = args.argv[:separator_index] + ['']
files = args.argv[separator_index + 1:]
files_number = len(files)
def percents(index: int) -> str:
return '%3d%%' % (100 * (index + 1) / files_number)
for index, filename in enumerate(files):
print(percents(index), os.path.basename(filename))
cmd[-1] = filename
proc = subprocess.Popen(cmd)
if args.wait:
proc.wait()
if __name__ == '__main__':
main()
| 2.34375 | 2 |
bobocep/rules/events/composite_event.py | r3w0p/bobocep | 5 | 12792226 | <reponame>r3w0p/bobocep
from typing import Dict
from bobocep.rules.events.bobo_event import BoboEvent
from bobocep.rules.events.histories.bobo_history import BoboHistory
class CompositeEvent(BoboEvent):
"""A composite event.
:param timestamp: The event timestamp indicating when it was first
generated.
:type timestamp: int
:param name: The event name.
:type name: str
:param history: The history of events that caused the composite event to be
generated.
:type history: BoboHistory
:param data: The event data, defaults to an empty dict.
:type data: Dict[str, str], optional
:param event_id: The event ID, defaults to a randomly generated ID.
:type event_id: str, optional
"""
NAME = "name"
HISTORY = "history"
def __init__(self,
timestamp: int,
name: str,
history: BoboHistory,
data: Dict[str, str] = None,
event_id: str = None) -> None:
super().__init__(timestamp=timestamp,
data=data,
event_id=event_id)
self.name = name
self.history = history
def to_dict(self) -> dict:
"""
:return: A dict representation of the object.
"""
return {
self.TIMESTAMP: self.timestamp,
self.NAME: self.name,
self.HISTORY: self.history.to_dict(),
self.DATA: self.data,
self.EVENT_ID: self.event_id
}
| 2.90625 | 3 |
pymortar/setup.py | gtfierro/mortar-frontend | 0 | 12792227 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
print(setuptools.find_packages())
setuptools.setup(
name="pymortar",
version="0.1.4",
author="<NAME>",
author_email="<EMAIL>",
description="Python3 Mortar",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mortar-frontend",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
],
)
| 1.601563 | 2 |
examples/Imagine/dynamical_test.py | smbct/LOLH | 2 | 12792228 | <gh_stars>1-10
#!/usr/bin/python
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import sys
sys.path.append('../../python')
from instance import Instance
import visualizer
import histogram
from network import GData
from network import Graph
# # read the atoms from the file
# filename = 'atoms_unshared.txt'
# file = open(filename, 'r')
# content = file.read().splitlines()
# file.close()
# atoms = []
# for elt in content:
# tokens = elt.split(' ')
# atoms.append((tokens[0], int(tokens[1])))
#
# # read the coexpression graph
# filename = '../../dataset/Imagine/coexpression/coexpression_network.txt'
# file = open(filename, 'r')
# content = file.read().splitlines()
# file.close()
#
# content_unshared = []
#
# for line in content:
# tokens = line.split(' ')
# atom = (tokens[0], int(tokens[1]))
# if atom in atoms:
# content_unshared.append(line)
# print(len(content_unshared))
#
# # save the sub network
# filename = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt'
# file = open(filename, 'w')
# ind = 0
# for line in content_unshared:
# file.write(line)
# if ind < len(content_unshared) - 1:
# file.write('\n')
# ind += 1
# file.close()
# create a sub coexpression graph
input_file = '../../dataset/Imagine/coexpression/coexpression_network_sub_test_dynamics.txt'
n_cell_min = 1
score_min = 0.005
louvain_param = 0.7
# output_file = '../../dataset/Imagine/coexpression/coexpression_graph_sub_test_dynamics.txt'
data = GData()
data.load_from_file(input_file, n_cell_min, score_min)
graph = Graph('sub coexpression network')
# build graph from raw data, exclude mitochondrial and ribosomal genes
exclude_mt_rp = True
filter_edges = True
graph.create_from_gdata(data, exclude_mt_rp, filter_edges)
graph.compute_clusters(louvain_param)
graph.compute_positions()
# graph.save(output_file)
# display the coexpression graph
print('display the coexpression graph')
col_option = 'clustering_colors'
fig, ax = plt.subplots()
graph.plot(ax, col_option, False)
ax.set_title('Coexpression graph')
plt.show()
| 2.6875 | 3 |
back-end/erasmail/emails/migrations/0043_auto_20210425_0635.py | SamirM-BE/ErasMail | 7 | 12792229 | <reponame>SamirM-BE/ErasMail
# Generated by Django 3.1.6 on 2021-04-25 06:35
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('emails', '0042_auto_20210423_0844'),
]
operations = [
migrations.RenameField(
model_name='emailheaders',
old_name='receiver',
new_name='owner',
),
]
| 1.546875 | 2 |
2868.py | ShawonBarman/URI-Online-judge-Ad-Hoc-level-problem-solution-in-python | 1 | 12792230 | c = int(input())
while c != 0:
c -= 1
num1, sign1, num2, sign2, ans = input().split()
correct_ans = 0
if sign1 == "+":
correct_ans = int(num1) + int(num2)
elif sign1 == "-":
correct_ans = int(num1) - int(num2)
else:
correct_ans = int(num1) * int(num2)
answer = 'r' * abs(correct_ans - int(ans))
print(f"E{answer}ou!") | 3.6875 | 4 |
ogb/graphproppred/ppa/transformer.py | rdangovs/6883-project | 0 | 12792231 | <reponame>rdangovs/6883-project
import dgl
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from scipy.linalg import eig
class TopologicalEncoding():
def __init__(self, encoding_size=50):
self.encoding_size = encoding_size
def encode(self, graph):
A = graph.adjacency_matrix().to_dense()
D = torch.diag(graph.in_degrees())
L = (D - A).numpy()
w, V = eig(L)
momenta = np.dot(L, V)
clipping = self.encoding_size - graph.num_nodes()
if clipping > 0:
momenta_ = []
for momentum in momenta:
momenta_.append(np.pad(momentum, (0, clipping)))
momenta = np.array(momenta_)
elif clipping < 0:
momenta = momenta[:, :self.encoding_size]
return torch.FloatTensor(np.real(momenta))
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu"):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = F.relu
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super(TransformerEncoderLayer, self).__setstate__(state)
def forward(self, src, src_mask=None, src_key_padding_mask=None):
src2, scores = self.self_attn(src, src, src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src, scores
class Graphormer(nn.Module):
def __init__(self, d_model=128, depth=5, nhead=8, expansion_factor=2,
alphabet_size=100, encoding_size=32, device='cpu', concatenate_encoding=True):
super().__init__()
self.d_model = d_model
self.depth = depth
self.nhead = nhead
self.dim_feedforward = d_model * expansion_factor
self.concatenate_encoding = concatenate_encoding
if concatenate_encoding:
self.encoder = TopologicalEncoding(encoding_size)
self.node_embedder = nn.Embedding(alphabet_size, d_model - encoding_size)
else:
self.node_embedder = nn.Embedding(alphabet_size, d_model)
self.encoder = TopologicalEncoding(d_model)
self.blocks = nn.ModuleList([TransformerEncoderLayer(d_model=d_model, nhead=nhead,
dim_feedforward=self.dim_feedforward)
for _ in range(depth)])
def forward(self, graphs, need_weights=False):
encoding = torch.cat([self.encoder.encode(graph) for graph in dgl.unbatch(graphs)], dim=0)
embedding = self.node_embedder(graphs.ndata['atomic'].type(torch.long))
if self.concatenate_encoding:
graphs.ndata['h'] = torch.cat((encoding, embedding.squeeze()), dim=-1)
else:
graphs.ndata['h'] = encoding + embedding.squeeze()
batch = []
for g in dgl.unbatch(graphs):
batch.append(g.ndata['h'])
h = torch.nn.utils.rnn.pad_sequence(batch)
attentions_ = []
for block in self.blocks:
h, att_ = block(h)
if need_weights: attentions_.append(att_)
truncated = [h[:num_nodes, i, :] for i, num_nodes in enumerate(graphs.batch_num_nodes())]
h = torch.cat(truncated, dim=0)
if need_weights:
return h, attentions_
return h | 2.171875 | 2 |
detection.py | harrisonedwards/OET_Controller | 0 | 12792232 | import cv2
import numpy as np
def detect(img):
# finds and fills the located robots
img = cv2.convertScaleAbs(img, 1, 1.5)
structure = np.ones((3, 3))
canny = np.copy(cv2.Canny(img, 20, 120))
dilated = cv2.dilate(canny, structure)
contours, hier = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
filled = cv2.drawContours(np.zeros(img.shape, dtype=np.uint8), contours, -1, 1, -1, 0, hier, 1)
return np.copy(filled)
def get_large_contours(detect):
# take a detection mask, and contour information add circles
contours, hier = cv2.findContours(detect, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
large_contours = []
contour_area_minimum = 2000
for c in contours:
if cv2.contourArea(c) > contour_area_minimum:
large_contours.append(c)
return large_contours
def get_robot_angle(contour, center):
contour = np.squeeze(np.copy(contour))
contour -= center
theta = np.arctan2(contour[:, 1], contour[:, 0])
# rho = np.sqrt(contour[:, 0] ** 2 + contour[:, 1] ** 2)
val, bin_edges = np.histogram(theta, bins=50, range=[-np.pi, np.pi])
bin_centers = bin_edges[:-1] + np.diff(bin_edges) / 2
return np.nanmean(np.where(val == 0, bin_centers, np.nan))
def get_robots(large_contours, detect, objective):
# get memory
robot_control_mask = np.zeros(detect.shape)
large_contour_image = cv2.drawContours(np.copy(robot_control_mask), large_contours, -1, 1, -1)
# probably needs more adjustment in the future, so will make a dict for now
objective_calibration_dict = {'2x': 4,
'4x': 2,
'10x': 1,
'20x': 1,
'40x': 1}
robot_angles = []
contours_towards_center = []
contour_range_border_limit = 100 * objective_calibration_dict[objective]
contours_in_limits = []
for contour in large_contours:
xs = np.squeeze(contour)[:, 0]
ys = np.squeeze(contour)[:, 1]
# check that our contours are within acceptable limits, draw their circle if they are
if np.all(xs > contour_range_border_limit) and np.all(
xs < large_contour_image.shape[0] - contour_range_border_limit):
if np.all(ys > contour_range_border_limit) and np.all(
ys < large_contour_image.shape[0] - contour_range_border_limit):
contours_in_limits.append(contour)
M = cv2.moments(contour)
cx = int(M["m10"] / M["m00"])
cy = int(M["m01"] / M["m00"])
contours_towards_center.append(contour)
angle = get_robot_angle(contour, (cx, cy))
robot_angles.append(angle)
return contours_towards_center, robot_angles
def get_robot_control(img, objective):
detected = detect(img)
large_contours = get_large_contours(detected)
robots, robot_angles = get_robots(large_contours,
detected,
objective)
return robots, robot_angles
| 2.90625 | 3 |
compphys/__init__.py | JWKennington/CompPhys | 0 | 12792233 | """Package-level information"""
__MAJOR__ = 0
__MINOR__ = 0
__MICRO__ = 1
__VERSION__ = (__MAJOR__, __MINOR__, __MICRO__)
__version__ = '.'.join(str(n) for n in __VERSION__)
__github_url__ = 'https://github.com/JWKennington/CompPhys'
from compphys.tests import run_tests # top level function for running test suite, analogous to scipy.test()
| 1.53125 | 2 |
redlight/err.py | sburns/red-light | 1 | 12792234 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" err.py
Redlight errors
"""
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = 'Copyright 2012 Vanderbilt University. All Rights Reserved'
class RedlightError(Exception):
pass
| 1.671875 | 2 |
install.py | InfinityMarketing/Harbor-Script | 0 | 12792235 | <reponame>InfinityMarketing/Harbor-Script
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 3 14:20:50 2017
@author: <NAME>
"""
import zipfile
import os
import requests
import glob
import subprocess
import platform
import sys, getopt
import argparse
import re
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--update',
action='store_true',
dest='update',
default=False,
help="Check for updates to the harbor script")
parser.add_argument('-i', '--install', action='store_true',
dest='install',
default=False,
help="Install harbor theme in current directory")
parser.add_argument('-d', '--directory', action='store',
dest='dir',
help="Specify a directory to install Harbor theme to or if -u option is present updates the Harbor based theme in that directory")
plat = platform.system()
results = parser.parse_args()
install = results.install
update = results.update
setup_dir = results.dir
if(install):
if setup_dir is not None:
os.chdir(setup_dir)
if platform.system() != "Windows":
if os.getuid() != 0:
print("Please run this script as root")
print("Example: 'sudo python3 setup.py'")
return
#download theme zip
if fetchArchive() == False:
return 1
print("Setting up Theme...")
slug = setupTheme()
setupEnvironment(slug)
elif update:
if setup_dir is not None:
updateTheme(setup_dir)
else:
print("Checking for updates to Harbor script...")
print("Up to date!")
else:
parser.print_usage()
def updateTheme(directory):
os.chdir(directory)
print("Updating theme...")
os.system("bower list > updates.tmp")
update_avail = re.compile("\(([0-9]\.)*[0-9] available\)")
nameRe = re.compile("[a-z]+-*[a-z]*#")
#print(update_avail.findall("├─┬ breakpoint-sass#2.5.0 "))
#exit(0)
with open("updates.tmp", "r") as update_file:
for line in update_file:
results = update_avail.findall(line)
if results != []:
print(line)
nameMatch = nameRe.search(line)
name = nameMatch.group()[:-1]
ans = input("Update module?(Y/n)")
while ans != "" and ans.lower()[0] != 'y' and ans.lower()[0] != 'n':
ans = input("Update module?(Y/n)")
if(ans == "" or ans.lower()[0] == 'y'):
print("updating", name, sep=" ")
os.system("bower update " + name)
print("")
print("Done!")
# Downloads the starter theme _s from github
def fetchArchive():
try:
os.remove("sass-restructure.zip")
except FileNotFoundError:
pass
print("Downloading Theme files...", end=' ')
file = requests.get("https://github.com/Infinity-Marketing/Harbor/archive/sass-restructure.zip")
if file.status_code != 200:
print("Error: There was a problem while downloading the files.\n\tAborting. ")
return False
with open("sass-restructure.zip", "wb") as content:
content.write(file.content)
print("Done!")
print("Extracting files...", end=' ')
with zipfile.ZipFile("sass-restructure.zip", "r") as file:
file.extractall(".")
print("Done!")
return True
def setupTheme():
name = input("Enter a name for the theme: ")
slug = name.lower().replace(' ', '-')
funcSlug = name.lower().replace(' ', '_')
desc = input("Enter a short description for the theme: ")
print("Setting up Theme...", end=' ')
os.rename("./Harbor-sass-restructure", "./" + slug)
files = glob.glob("./" + slug + "/*.php")
for filename in glob.glob("./" + slug + "/*/*.php"):
files.append(filename)
strings = []
strings.append(("'harbor'", "'" + slug + "'"))
strings.append(("harbor_", funcSlug + "_"))
strings.append((" <code> Harbor</code>", " <code> " + name.replace(' ', '_') + "</code>"))
strings.append(("Harbor-", slug + "-"))
findInFiles(strings, files)
headerInfo = []
headerInfo.append(("Text Domain: harbor", "Text Domain: " + slug))
headerInfo.append(("Theme Name: Harbor", "Theme Name: " + name))
headerInfo.append(("Description: Harbor is a starter theme and development environment setup by Infinity Marketing that is heavily based on Automattic's Underscores theme.", "Description: " + desc))
findInFiles(headerInfo, ["./" + slug + "/style.css", "./" + slug + "/sass/style.scss"])
print('Done!')
return slug
def findInFiles(strings, files):
for filename in files:
file = open(filename, "r")
filedata = file.read()
file.close()
for change in strings:
filedata = filedata.replace(change[0], change[1])
file = open(filename, "w")
file.write(filedata)
file.close()
def setupEnvironment(slug):
cmd = "where" if platform.system() == "Windows" else "which"
npm = subprocess.run(cmd+ " npm", shell=True)
if npm.returncode == 1:
print("NodeJs is not installed. Aborting")
return
bower = subprocess.run(cmd+ " bower", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if bower.returncode == 1:
print("Bower is not installed.")
print("Installing bower...")
subprocess.run("npm install -g bower", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print("Done!")
gulp = subprocess.run(cmd+ " gulp", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if gulp.returncode == 1:
print("Gulp is not installed")
print("Installing Gulp...", end=' ')
subprocess.run("npm install -g gulp", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print("Done!")
print("Installing dependancies...")
subprocess.run("bower install", shell=True, cwd="./"+slug)
subprocess.run("npm install", shell=True, cwd="./"+slug)
print("Done!")
if(__name__ == "__main__"):
main()
| 1.6875 | 2 |
green/__init__.py | dtan3847/green | 0 | 12792236 | <filename>green/__init__.py
from __future__ import unicode_literals
from .cmdline import main
from .version import __version__
main
__version__
| 1.15625 | 1 |
PcmPy/vis.py | mehrdadkashefi/PcmPy | 0 | 12792237 | <filename>PcmPy/vis.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Functions for visualization of PCM models, Data, and model fits
@author: jdiedrichsen
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.lines as mlines
import seaborn as sb
import pandas as pd
def model_plot(likelihood,null_model=0,noise_ceiling=None,upper_ceiling=None):
"""
Make model comparisiom plot
Parameters:
likelihood (pd.DataFrame)
Data Frame with the results (from T.likelihood)
null_model (int or string)
Number or name of the model that define the zero-point
noise_ceiling(int or string)
Number or name of the model that defines the noise ceiling
upper_ceiling (np.array or series)
Likelihood for the upper noise ceiling (usuallu from group fit)
Returns:
ax (matplotlib.Axis.axis)
Matplotlib axis object
"""
noise_ceil_col = [0.5, 0.5, 0.5, 0.2]
m_names = likelihood.columns.values
if type(null_model) != str:
null_model = m_names[null_model]
if noise_ceiling is not None:
if type(noise_ceiling) != str:
noise_ceiling = m_names[noise_ceiling]
# Subtract the baseline
baseline = likelihood.loc[:,null_model].values
likelihood = likelihood - baseline.reshape(-1,1)
# Stretch out the data frame
LL=pd.melt(likelihood)
indx = np.logical_and(LL.model !=null_model, LL.model !=noise_ceiling)
ax = sb.barplot(x=LL.model[indx], y=LL.value[indx])
xlim = ax.get_xlim()
if noise_ceiling is not None:
noise_lower = np.nanmean(likelihood[noise_ceiling])
if upper_ceiling is not None:
noise_upper = np.nanmean(upper_ceiling-baseline)
noiserect = patches.Rectangle((xlim[0], noise_lower), xlim[1]-xlim[0], noise_upper-noise_lower, linewidth=0, facecolor=noise_ceil_col, zorder=1e6)
ax.add_patch(noiserect)
else:
l = mlines.Line2D([xlim[0], xlim[1]], [noise_lower, noise_lower],color=[0,0,0], linestyle=':')
ax.add_line(l)
ax.set_ylabel('Log Bayes Factor')
return ax | 2.765625 | 3 |
src/lr/models/transformers/processor.py | felipessalvatore/logical-robustness | 0 | 12792238 | <filename>src/lr/models/transformers/processor.py
import re
import logging
import torch
import os
import pandas as pd
import numpy as np
from multiprocessing import Pool
import random
from transformers.data.processors.utils import InputExample, InputFeatures
from transformers.data.processors.utils import DataProcessor
from torch.utils.data import TensorDataset
logging.basicConfig(filename='example.log', level=logging.INFO)
spaces = re.compile(' +')
def merge_lists(lists):
base = []
for l in lists:
base.extend(l)
return base
def parallelize_df2df(df, func, n_cores):
"""
general fucntion to parallelize a function applied to
a df
"""
df_split = np.array_split(df, n_cores)
pool = Pool(n_cores)
df = pd.concat(pool.map(func, df_split))
pool.close()
pool.join()
return df
def parallelize_df2list(df, func, n_cores):
"""
general fucntion to parallelize a function applied to
a df
"""
df_split = np.array_split(df, n_cores)
pool = Pool(n_cores)
result = merge_lists(pool.map(func, df_split))
pool.close()
pool.join()
return result
def remove_first_space(x):
"""
remove_first_space from word x
:param x: word
:type x: str
:return: word withou space in front
:rtype: str
"""
try:
if x[0] == " ":
return x[1:]
else:
return x
except IndexError:
return x
def simple_pre_process_text(data, text_column):
"""
preprocess all input text from dataframe by
lowering, removing non words, removing
space in the first position and
removing double spaces
:param data: data frame with the colum 'text'
:type data: pd.DataFrame
:param text_column: colum text_column
:type text_column: str
"""
s = data.loc[:, text_column].copy()
s = s.apply(lambda x: x.lower())
s = s.apply((lambda x: re.sub('[^a-zA-z0-9\s]', '', x))) # noqa
s = s.apply(remove_first_space) # noqa remove space in the first position
s = s.apply((lambda x: spaces.sub(" ", x))) # noqa remove double spaces
return s
def pre_process_nli_df(data):
"""
Apply preprocess on the input text from a NLI dataframe
:param data: data frame with the colum 'text'
:type data: pd.DataFrame
"""
new_p = simple_pre_process_text(data, text_column="premise")
new_h = simple_pre_process_text(data, text_column="hypothesis")
label = data.label
o_index = data.o_index
dict_ = {"premise": new_p, "hypothesis": new_h,
"label": label, "o_index": o_index}
return pd.DataFrame(dict_)
def filter_df_by_label(df, drop_label='-'):
"""
drop observations with label 'drop_label'
"""
return df.loc[df.label != drop_label]
def clean_df(df, n_cores):
"""
return clean version of the dataframe
with the original index
"""
df_new = df.copy()
df_new.loc[:, "o_index"] = df.index
df_new = filter_df_by_label(df_new.dropna()).reset_index(drop=True)
df_new = parallelize_df2df(df=df_new, func=pre_process_nli_df, n_cores=n_cores)
return df_new
class NLIProcessor(DataProcessor):
"""Processor for the any nli dataf frame in csv
(columns = premise | hypothesis | label)"""
def __init__(self, hyperparams):
super().__init__()
self.tokenizer = hyperparams["tokenizer"]
self.max_length = hyperparams["max_seq_length"]
self.pad_on_left = hyperparams["pad_on_left"]
self.pad_token = hyperparams["pad_token"]
self.pad_token_segment_id = hyperparams["pad_token_segment_id"]
self.mask_padding_with_zero = hyperparams["mask_padding_with_zero"]
self.base_path = hyperparams["base_path"]
def df2examples(self, df, set_type):
df = filter_df_by_label(df.dropna()).reset_index(drop=True)
df = pre_process_nli_df(df)
examples = self._create_examples(df, set_type)
return examples
def get_train_examples(self, df):
return self.df2examples(df, "train")
def get_dev_examples(self, df):
return self.df2examples(df, "dev")
def df2examples_parallel_train(self, df, n_cores):
df_new = df.copy()
if "o_index" not in df_new.columns:
df_new.loc[:, "o_index"] = df.index
result = parallelize_df2list(df_new, self.get_train_examples, n_cores)
del df_new
return result
def df2examples_parallel_dev(self, df, n_cores):
df_new = df.copy()
df_new.loc[:, "o_index"] = df.index
result = parallelize_df2list(df_new, self.get_dev_examples, n_cores)
del df_new
return result
def get_labels(self):
return ["contradiction", "entailment", "neutral"]
def get_label_map(self):
label_list = self.get_labels()
label_map = {label: i for i, label in enumerate(label_list)}
return label_map
def _create_examples(self, df, set_type):
"""Creates examples for the training and dev sets."""
examples = []
n = df.shape[0]
for i in range(n):
example = df.loc[i]
name = example.o_index
guid = "{}-{}".format(set_type, name)
input_example = InputExample(guid=guid,
text_a=example.premise,
text_b=example.hypothesis,
label=example.label)
examples.append(input_example)
return examples
def _convert_examples_to_features(self, examples):
max_length = self.max_length
pad_token = self.pad_token
pad_token_segment_id = self.pad_token_segment_id
mask_padding_with_zero = self.mask_padding_with_zero
label_map = self.get_label_map()
features = []
for (ex_index, example) in enumerate(examples):
len_examples = len(examples)
inputs = self.tokenizer.encode_plus(example.text_a,
example.text_b,
add_special_tokens=True,
max_length=max_length)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [
1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if self.pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1]
* padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] *
padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + \
([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + \
([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(
len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(
len(attention_mask), max_length)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(
len(token_type_ids), max_length)
label = label_map[example.label]
features.append(InputFeatures(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=label))
return features
def examples2features_parallel(self, examples, n_cores):
result = parallelize_df2list(examples,
self._convert_examples_to_features,
n_cores)
return result
def df2features(self, df, n_cores, mode):
path = self.base_path + "{}_{}".format(mode, self.max_length)
logging.info("Saving features in file: %s", path)
if mode.find("train") > -1:
examples = self.df2examples_parallel_train(df, n_cores)
else:
examples = self.df2examples_parallel_dev(df, n_cores)
features = self.examples2features_parallel(examples, n_cores)
torch.save(features, path)
return path
def features2dataset(cached_features_file):
assert os.path.exists(cached_features_file)
logging.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
all_input_ids = torch.tensor(
[f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor(
[f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor(
[f.token_type_ids for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(
all_input_ids,
all_attention_mask,
all_token_type_ids,
all_labels)
return dataset
| 2.609375 | 3 |
k8s_snapshots/logconf.py | gmarkey/k8s-snapshots | 326 | 12792239 | <filename>k8s_snapshots/logconf.py<gh_stars>100-1000
import logging
import logging.config
from collections import OrderedDict
from typing import Optional, List, Any, Dict
import structlog
import sys
from k8s_snapshots import serialize
class ProcessStructuredErrors:
def __init__(self):
pass
def __call__(self, logger, method_name, event_dict):
exc_info = event_dict.pop('exc_info', None)
if exc_info is None:
return event_dict
exc_type, exc, exc_tb = structlog.processors._figure_out_exc_info(
exc_info)
__structlog__ = getattr(exc, '__structlog__', None)
if not callable(__structlog__):
event_dict['exc_info'] = exc_info
return event_dict
structured_error = __structlog__()
event_dict['structured_error'] = structured_error
return event_dict
def add_message(logger, method_name, event_dict):
"""
Creates a ``message`` value based on the ``hint`` and ``key_hint`` keys.
``key_hint`` : ``Optional[str]``
a '.'-separated path of dictionary keys.
``hint`` : ``Optional[str]``
will be formatted using ``.format(**event_dict)``.
"""
def from_hint(ed):
hint = event_dict.pop('hint', None)
if hint is None:
return
try:
return hint.format(**event_dict)
except Exception as exc:
return f'! error formatting message: {exc!r}'
def path_value(dict_: Dict[str, Any], key_path: str) -> Optional[Any]:
value = dict_
for key in key_path.split('.'):
if value is None:
return
__structlog__ = getattr(value, '__structlog__', None)
if __structlog__ is not None:
value = __structlog__()
value = value.get(key)
return value
def from_key_hint(ed) -> Optional[str]:
key_hint = ed.pop('key_hint', None)
if key_hint is None:
return
value = path_value(ed, key_hint)
return format_kv(key_hint, value)
def from_key_hints(ed) -> List[str]:
key_hints = ed.pop('key_hints', None)
if key_hints is None:
return []
return [
format_kv(key_hint, path_value(ed, key_hint))
for key_hint in key_hints
]
def format_kv(key: str, value: Any) -> str:
return f'{key}={serialize.process(value)}'
hints = [
from_hint(event_dict),
from_key_hint(event_dict)
]
hints += from_key_hints(event_dict)
if all(hint is None for hint in hints):
if event_dict.get('message') is None:
event_dict['message'] = event_dict.get('event')
return event_dict
prefix = event_dict['event']
hint = ', '.join(hint for hint in hints if hint is not None)
message = event_dict.get('message')
if message is not None:
message = f'{prefix}: {message}, {hint}'
else:
message = f'{prefix}: {hint}'
event_dict['message'] = message
return event_dict
def configure_from_config(config):
configure_logging(
level_name=config['log_level'],
for_humans=not config['json_log'],
json_indent=config['structlog_json_indent'] or None,
)
def configure_logging(
level_name: str='INFO',
for_humans: bool=False,
json_indent: Optional[int]=None,
):
configure_structlog(
for_humans=for_humans,
json_indent=json_indent,
level_name=level_name,
)
def configure_structlog(
for_humans: bool=False,
json_indent: Optional[int]=None,
level_name: str='INFO'
):
key_order = ['message', 'event', 'level']
timestamper = structlog.processors.TimeStamper(fmt='ISO')
processors = [
event_enum_to_str,
ProcessStructuredErrors(),
structlog.stdlib.add_logger_name,
structlog.stdlib.add_log_level,
rename_level_to_severity,
timestamper,
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
add_func_name,
add_message,
order_keys(key_order),
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
]
if for_humans:
renderer = structlog.dev.ConsoleRenderer() # <===
else:
# Make it so that 0 ⇒ None
indent = json_indent or None
renderer = structlog.processors.JSONRenderer(
indent=indent,
serializer=serialize.dumps
)
foreign_pre_chain = [
# Add the log level and a timestamp to the event_dict if the log entry
# is not from structlog.
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
structlog.stdlib.add_log_level,
structlog.stdlib.add_logger_name,
foreign_event_to_message,
rename_level_to_severity,
timestamper,
]
if level_name == 'DEBUG':
root_logger_level = 'DEBUG'
else:
root_logger_level = 'ERROR'
logging_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'structlog': {
'()': structlog.stdlib.ProcessorFormatter,
'processor': renderer,
'foreign_pre_chain': foreign_pre_chain,
},
},
'handlers': {
'default': {
'level': level_name,
'class': 'logging.StreamHandler',
'stream': sys.stdout,
'formatter': 'structlog',
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': root_logger_level,
'propagate': True,
},
'k8s_snapshots': {
'level': 'DEBUG',
}
}
}
logging.config.dictConfig(logging_config)
structlog.configure(
processors=processors,
context_class=OrderedDict,
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
def foreign_event_to_message(logger, method_name, event_dict):
event = event_dict.get('event')
if event is not None and 'message' not in event_dict:
event_dict['message'] = event
event_dict['event'] = 'foreign'
return event_dict
def rename_level_to_severity(logger, method_name, event_dict):
level = event_dict.pop('level', None)
event_dict['severity'] = level.upper()
return event_dict
def add_func_name(logger, method_rame, event_dict):
record = event_dict.get('_record')
if record is None:
return event_dict
event_dict['function'] = record.funcName
return event_dict
def order_keys(order):
"""
Order keys for JSON readability when not using json_log=True
"""
def processor(logger, method_name, event_dict):
if not isinstance(event_dict, OrderedDict):
return event_dict
for key in reversed(order):
if key in event_dict:
event_dict.move_to_end(key, last=False)
return event_dict
return processor
def event_enum_to_str(logger, method_name, event_dict):
from k8s_snapshots import events
event = event_dict.get('event')
if event is None:
return event_dict
if isinstance(event, events.EventEnum):
event_dict['event'] = event.value
return event_dict
| 2.171875 | 2 |
utserverquery/unrealmaster.py | cwilkc/utserverquery | 1 | 12792240 | import socket
import re
import concurrent.futures
from pprint import pformat
from .unrealserver import UnrealServer
# Setup Logger
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class UnrealMasterServer(object):
def __init__(
self,
hostname,
port,
**kwargs,
):
"""
UnrealMasterServer class init statement
Args:
hostname (str): Resolvable DNS name or IP address for the
Master Server you'd wish to poll.
port (int): The port number the master server is
listening on
"""
self.hostname = hostname
self.port = port
self.servers = []
if 'logger' not in kwargs:
self.logger = logger
else:
self.logger = kwargs['logger']
if 'timeout' not in kwargs:
self.timeout = 5
else:
self.timeout = kwargs['timeout']
self.logger.debug(f'Passed kwargs: {kwargs}')
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(self.timeout)
self.server = (self.hostname, self.port)
def get_servers(self):
"""
Poll the Master Server for a client list and sets the class
attribute of 'servers' to a list of Server objects.
Returns: None
"""
# The Quake style queries to end clients (whether they be Master Servers
# or server clients), need a header of 4 \xFF bytes
command = b"\\list\\gamename\\ut\\final\\"
self.logger.debug(
f'Sending command \'\\{command}\\\' to {self.hostname}:{self.port}'
)
self.sock.connect(self.server)
self.sock.sendto(command, self.server)
fullmsg = ''
try:
while True:
msg = self.sock.recv(4096)
if len(msg) <= 0:
break
fullmsg += msg.decode('utf-8')
except socket.timeout as e:
raise e
self.logger.debug(f'Raw data received:\n\n{fullmsg}')
data = fullmsg.split('\\')[5:]
for item in data[1::2][:-1]:
self.servers.append(
UnrealServer(item.split(':')[0], int(item.split(':')[1]), logger=self.logger)
)
self.logger.info(
f'Found {len(self.servers)} servers running.'
)
def search_servers(self, query):
"""
Search for a given query in any of the values in the server dict
keys.
Args:
query (str): the search query to look for in the dictionary keys
Returns: A list of Servers
"""
if not self.servers:
return
return_list = []
self.logger.info(
f'Searching {len(self.servers)} servers for keyword \'{query}\'.'
)
for server in self.servers:
self.logger.info(f"Scanning {server} for keyword.")
self.logger.debug(f"{pformat(server.info)}")
info_results = [
key for key, val in server.info.items()
if re.search(
query,
str(val),
re.IGNORECASE
)
]
if info_results:
return_list.append(server)
return return_list
def poll_now(self):
"""
Concurrently poll all servers captured from the Master Server and
capture info and status headers.
Returns: None
"""
def get_server_info(server):
server.poll_server()
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(get_server_info, self.servers) | 2.6875 | 3 |
tests/test_neo4j.py | NCATS-Gamma/robokop-messenger | 0 | 12792241 | """Test Neo4j."""
# pylint: disable=redefined-outer-name,no-name-in-module,unused-import
# ^^^ this stuff happens because of the incredible way we do pytest fixtures
import json
import os
import pytest
from .setup.neo4j_ import get_edge_properties, get_node_properties
NEO4J_URL = os.environ.get('NEO4J_URL', 'http://localhost:7474')
NEO4J_USER = os.environ.get('NEO4J_USER', 'neo4j')
NEO4J_PASSWORD = os.environ.get('NEO4J_PASSWORD', '<PASSWORD>')
def test_yank_edges():
"""Test yanking edges from the KG."""
options = {
"url": NEO4J_URL,
"credentials": {
"username": NEO4J_USER,
"password": <PASSWORD>,
},
}
edge_id = '18557484'
edges = get_edge_properties([edge_id], **options)
assert len(edges) == 1
assert edges[0]['id'] == edge_id
def test_fail_yank():
"""Test yanking nodes/edges from the KG."""
options = {
"url": NEO4J_URL,
"credentials": {
"username": NEO4J_USER,
"password": <PASSWORD>,
},
}
edge_ids = [
'18557484',
'nope',
]
with pytest.raises(RuntimeError):
get_edge_properties(edge_ids, **options)
node_ids = [
'MONDO:0005737',
'nope',
]
with pytest.raises(RuntimeError):
get_node_properties(node_ids, **options)
| 2.234375 | 2 |
src/pyTemplateBath/hello.py | StephenRicher/pyTemplate | 0 | 12792242 | <gh_stars>0
#!/usr/bin/env python3
""" Print Hello World."""
import logging
def hello_world(name='World', **kwargs):
print(f'Hello {name}.')
logging.debug(f"Printed 'Hello {name}.'")
| 2.671875 | 3 |
prml/kernel/support_vector_classifier.py | dinhtuyen/PRML01 | 2 | 12792243 | <gh_stars>1-10
import numpy as np
class SupportVectorClassifier(object):
def __init__(self, kernel, C=np.Inf):
"""
construct support vector classifier
Parameters
----------
kernel : Kernel
kernel function to compute inner products
C : float
penalty of misclassification
"""
self.kernel = kernel
self.C = C
def fit(self, X, t, learning_rate=0.1, decay_step=10000, decay_rate=0.9, min_lr=1e-5):
"""
estimate decision boundary and its support vectors
Parameters
----------
X : (sample_size, n_features) ndarray
input data
t : (sample_size,) ndarray
corresponding labels 1 or -1
learning_rate : float
update ratio of the lagrange multiplier
decay_step : int
number of iterations till decay
decay_rate : float
rate of learning rate decay
min_lr : float
minimum value of learning rate
Attributes
----------
a : (sample_size,) ndarray
lagrange multiplier
b : float
bias parameter
support_vector : (n_vector, n_features) ndarray
support vectors of the boundary
"""
if X.ndim == 1:
X = X[:, None]
assert X.ndim == 2
assert t.ndim == 1
lr = learning_rate
t2 = np.sum(np.square(t))
if self.C == np.Inf:
a = np.ones(len(t))
else:
a = np.zeros(len(t)) + self.C / 10
Gram = self.kernel(X, X)
H = t * t[:, None] * Gram
while True:
for i in range(decay_step):
grad = 1 - H @ a
a += lr * grad
a -= (a @ t) * t / t2
np.clip(a, 0, self.C, out=a)
mask = a > 0
self.X = X[mask]
self.t = t[mask]
self.a = a[mask]
self.b = np.mean(
self.t - np.sum(
self.a * self.t
* self.kernel(self.X, self.X),
axis=-1))
if self.C == np.Inf:
if np.allclose(self.distance(self.X) * self.t, 1, rtol=0.01, atol=0.01):
break
else:
if np.all(np.greater_equal(1.01, self.distance(self.X) * self.t)):
break
if lr < min_lr:
break
lr *= decay_rate
def lagrangian_function(self):
return (
np.sum(self.a)
- self.a
@ (self.t * self.t[:, None] * self.kernel(self.X, self.X))
@ self.a)
def predict(self, x):
"""
predict labels of the input
Parameters
----------
x : (sample_size, n_features) ndarray
input
Returns
-------
label : (sample_size,) ndarray
predicted labels
"""
y = self.distance(x)
label = np.sign(y)
return label
def distance(self, x):
"""
calculate distance from the decision boundary
Parameters
----------
x : (sample_size, n_features) ndarray
input
Returns
-------
distance : (sample_size,) ndarray
distance from the boundary
"""
distance = np.sum(
self.a * self.t
* self.kernel(x, self.X),
axis=-1) + self.b
return distance
| 2.953125 | 3 |
anti-spoof/augmentor.py | hudmgy/insightface | 1 | 12792244 | <filename>anti-spoof/augmentor.py<gh_stars>1-10
import Augmentor
def RandomAugment(folder, IP=False, Graph=False, Erase=False):
if IP==False and Graph==False and Erase==False:
return None
p = Augmentor.Pipeline(folder)
if IP:
p.random_color(0.5, min_factor=0.4, max_factor=1.6)
p.random_brightness(0.5, min_factor=0.4, max_factor=1.6)
p.random_contrast(0.5, min_factor=0.4, max_factor=1.2)
if Graph:
#p.rotate(probability=0.7, max_left_rotation=20, max_right_rotation=20)
#p.zoom(probability=0.5, min_factor=0.8, max_factor=1.2)
p.random_distortion(probability=0.5, grid_width=4, grid_height=4, magnitude=4)
#p.skew_left_right(probability=0.5, magnitude=0.15)
if Erase:
p.random_erasing(1.0,rectangle_area=0.5)
return p
if __name__=='__main__':
folder = '29xxx'
p = RandomAugment(folder, Graph=True)
p.sample(1000)
| 2.5625 | 3 |
registrations/migrations/0015_auto_20181015_1002.py | praekeltfoundation/ndoh-hub | 0 | 12792245 | <filename>registrations/migrations/0015_auto_20181015_1002.py
# Generated by Django 2.1.2 on 2018-10-15 10:02
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("registrations", "0014_auto_20180503_1418")]
operations = [
migrations.AlterField(
model_name="registration",
name="created_by",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="registrations_created",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="registration",
name="updated_by",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="registrations_updated",
to=settings.AUTH_USER_MODEL,
),
),
]
| 1.476563 | 1 |
app/routes.py | BrevinS/brebapp | 0 | 12792246 | from flask import render_template, flash, redirect, url_for, request
from app import app, db
from flask_sqlalchemy import sqlalchemy
from app.forms import RegisterForm, LoginForm
from app.models import User
from flask_login import current_user, login_user, logout_user, login_required
@app.before_first_request
def initDB(*args, **kwargs):
db.create_all()
@app.route('/', methods=['GET'])
@app.route('/index', methods=['GET'])
def index():
return render_template('base.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm()
if form.validate_on_submit():
acc = User(username=form.username.data, email=form.email.data, firstname=form.firstname.data,
lastname=form.lastname.data)
acc.get_password(form.password2.data)
db.session.add(acc)
db.session.commit()
return redirect(url_for('index'))
return render_template('user_registration.html', form=form)
@login_required
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
student = User.query.filter_by(username=form.username.data).first()
if student is None or not student.check_password(form.password.data):
flash('Not a username or incorrect password!')
return redirect(url_for('login'))
login_user(student, remember=form.rememberme.data)
return redirect(url_for('index'))
return render_template('login.html', title='Login Page', form=form)
| 2.71875 | 3 |
codigo_Python_Raspberry/detect_status_plants.py | jonathanramirezislas/HidroTerra | 1 | 12792247 | import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Red color
low_red = np.array([161, 155, 84])
high_red = np.array([179, 255, 255])
red_mask = cv2.inRange(hsv_frame, low_red, high_red)
red = cv2.bitwise_and(frame, frame, mask=red_mask)
# Blue color
low_blue = np.array([94, 80, 2])
high_blue = np.array([126, 255, 255])
blue_mask = cv2.inRange(hsv_frame, low_blue, high_blue)
blue = cv2.bitwise_and(frame, frame, mask=blue_mask)
# Green color
low_green = np.array([25, 52, 72])
high_green = np.array([102, 255, 255])
green_mask = cv2.inRange(hsv_frame, low_green, high_green)
green = cv2.bitwise_and(frame, frame, mask=green_mask)
#yellow
low_yellow = np.array([21, 39, 64])
high_yellow = np.array([40, 255, 255])
yellow_mask = cv2.inRange(hsv_frame, low_yellow, high_yellow)
yellow = cv2.bitwise_and(frame, frame, mask=yellow_mask)
# Every color except white
low = np.array([0, 42, 0])
high = np.array([179, 255, 255])
mask = cv2.inRange(hsv_frame, low, high)
result = cv2.bitwise_and(frame, frame, mask=mask)
#cv2.imshow("Frame", frame)
#cv2.imshow("Red", red)
#cv2.imshow("Blue", blue)
cv2.imshow("Green", green)
cv2.imshow("Yellow", yellow)
#cv2.imshow("Result", result)
key = cv2.waitKey(1)
if key == 27:
break
| 2.8125 | 3 |
src/articles/utils.py | robzzy/articles-service | 0 | 12792248 | <reponame>robzzy/articles-service<gh_stars>0
# -*- coding: utf-8 -*-
from datetime import datetime
def utcnow():
return datetime.utcnow()
| 1.992188 | 2 |
openstack_registration/views.py | AMfalme/Horizon_Openstack | 0 | 12792249 | from django.shortcuts import render
from django.conf.urls import url
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views.decorators.cache import never_cache
from django import shortcuts
from django.conf import settings
from . import forms
import requests
import json
from django.http import Http404
from django.shortcuts import redirect
from django.contrib import messages
@sensitive_post_parameters()
@csrf_protect
@never_cache
def signup(request):
if request.user.is_authenticated():
return shortcuts.redirect(settings.LOGIN_REDIRECT_URL)
if request.method == 'POST':
form = forms.UserCreationForm(request.POST)
if form.is_valid():
messages.success(request, 'Successfully created account. Please check your email to verify your account before logging in.')
return shortcuts.redirect(settings.LOGIN_REDIRECT_URL)
else:
form = forms.UserCreationForm()
return render(request, 'registration/signup.html', {'form': form})
def verify(request):
if request.GET.get('token') is not None:
payload = {'token': request.GET.get('token')}
headers = {'Content-type': 'application/json'}
res = requests.post(settings.EMAIL_VERIFICATION_URL, data=json.dumps(payload), headers=headers)
if res.status_code == 200:
messages.success(request, 'Email Address Verified! Please log in.')
return redirect('/auth/login')
raise Http404()
def reset_password(request):
if request.user.is_authenticated():
return shortcuts.redirect(settings.LOGIN_REDIRECT_URL)
if request.method == 'POST':
form = forms.UserResetPasswordForm(request.POST)
if form.is_valid():
messages.success(request, "We have sent a password reset link. Please check your email.")
return shortcuts.redirect(settings.LOGIN_REDIRECT_URL)
else:
form = forms.UserResetPasswordForm()
return render(request, 'registration/reset_password.html', {'form': form})
@sensitive_post_parameters()
@csrf_protect
@never_cache
def password_update(request):
if request.user.is_authenticated():
return shortcuts.redirect(settings.LOGIN_REDIRECT_URL)
if request.method == 'POST':
form = forms.UserPasswordUpdateForm(request.POST)
if form.is_valid():
token = request.path.rsplit('/', 1)[-1]
password = form.cleaned_data['<PASSWORD>']
payload = {'token': token, 'password': password}
headers = {'Content-type': 'application/json'}
res = requests.post(settings.PASSWORD_UPDATE_URL, data=json.dumps(payload), headers=headers)
if res.status_code == 200:
messages.success(request, "Password updated successfully. Please log in.")
else:
messages.error(request, "That reset link does not exist or has expired. Please request a new reset password link by going to the reset password page.")
return redirect('/auth/login')
else:
form = forms.UserPasswordUpdateForm()
return render(request, 'registration/password_update.html', {'form': form}) | 2.0625 | 2 |
setup.py | samir-joshi/tmtoolkit | 167 | 12792250 | <filename>setup.py<gh_stars>100-1000
"""
tmtoolkit setuptools based setup module
"""
import os
from codecs import open
from setuptools import setup, find_packages
__title__ = 'tmtoolkit'
__version__ = '0.10.0'
__author__ = '<NAME>'
__license__ = 'Apache License 2.0'
GITHUB_URL = 'https://github.com/WZBSocialScienceCenter/tmtoolkit'
DEPS_BASE = ['numpy>=1.19.0,<2', 'scipy>=1.5.0,<1.6', 'pandas>=1.1.0,<1.2', 'xlrd>=1.2.0',
'globre>=0.1.5,<0.2', 'matplotlib>=3.3.0,<3.4', 'spacy>=2.3.0,<2.4']
DEPS_EXTRA = {
'datatable': ['datatable>=0.10.0,<0.11'],
'nltk': ['nltk>=3.5.0,<3.6'],
'excel_export': ['openpyxl>=3.0.0'],
'wordclouds': ['wordcloud>=1.7.0,<1.8', 'Pillow>=7.2.0,<7.3'],
'lda': ['ldafork>=1.2.0,<1.3'],
'sklearn': ['scikit-learn>=0.23,<0.24'],
'gensim': ['gensim>=3.8.0,<3.9'],
'topic_modeling_eval_extra': ['gmpy2>=2.0.0,<3'],
'test': ['pytest>=6.0.0,<7', 'hypothesis>=5.23.0<5.24', 'decorator>=4.4.0,<4.5'],
'doc': ['Sphinx>=3.1.0', 'sphinx-rtd-theme>=0.5.0', 'nbsphinx>=0.7.0'],
'dev': ['coverage>=5.2', 'coverage-badge>=1.0.0', 'pytest-cov>=2.10.0', 'twine>=3.2.0',
'ipython>=7.16.0', 'jupyter>=1.0.0', 'notebook>=6.0.0', 'tox>=3.18.0'],
}
DEPS_EXTRA['recommended'] = DEPS_EXTRA['excel_export'] + DEPS_EXTRA['wordclouds']
DEPS_EXTRA['all'] = []
for k, deps in DEPS_EXTRA.items():
if k not in {'recommended', 'all'}:
DEPS_EXTRA['all'].extend(deps)
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name=__title__,
version=__version__,
description='Text Mining and Topic Modeling Toolkit',
long_description=long_description,
long_description_content_type='text/x-rst',
url=GITHUB_URL,
project_urls={
'Bug Reports': GITHUB_URL + '/issues',
'Source': GITHUB_URL,
},
author=__author__,
author_email='<EMAIL>',
license=__license__,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
keywords='textmining textanalysis text mining analysis preprocessing topicmodeling topic modeling evaluation',
packages=find_packages(exclude=['tests', 'examples']),
include_package_data=True,
python_requires='>=3.6',
install_requires=DEPS_BASE,
extras_require=DEPS_EXTRA
)
| 1.367188 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.