hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
167f92f56a42d5741ea4dde46075bf065ebbe3cd
| 11,512 |
py
|
Python
|
Bindings/Python/examples/Moco/examplePredictAndTrack.py
|
mcx/opensim-core
|
c109f8cec3a81c732f335cd39752da6ae573b604
|
[
"Apache-2.0"
] | 532 |
2015-03-13T18:51:10.000Z
|
2022-03-27T08:08:29.000Z
|
Bindings/Python/examples/Moco/examplePredictAndTrack.py
|
mcx/opensim-core
|
c109f8cec3a81c732f335cd39752da6ae573b604
|
[
"Apache-2.0"
] | 2,701 |
2015-01-03T21:33:34.000Z
|
2022-03-30T07:13:41.000Z
|
Bindings/Python/examples/Moco/examplePredictAndTrack.py
|
mcx/opensim-core
|
c109f8cec3a81c732f335cd39752da6ae573b604
|
[
"Apache-2.0"
] | 271 |
2015-02-16T23:25:29.000Z
|
2022-03-30T20:12:17.000Z
|
# -------------------------------------------------------------------------- #
# OpenSim Moco: examplePredictAndTrack.py #
# -------------------------------------------------------------------------- #
# Copyright (c) 2018 Stanford University and the Authors #
# #
# Author(s): Christopher Dembia #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain a #
# copy of the License at http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# -------------------------------------------------------------------------- #
import os
import math
import opensim as osim
"""
This file performs the following problems using a
double pendulum model:
1. predict an optimal trajectory (and controls),
2. track the states from the optimal trajectory, and
3. track the marker trajectories from the optimal trajectory.
"""
visualize = True
# The following environment variable is set during automated testing.
if os.getenv('OPENSIM_USE_VISUALIZER') == '0':
visualize = False
# Create a model of a double pendulum.
# ------------------------------------
optimalTrajectory = solvePrediction()
markersRef = computeMarkersReference(optimalTrajectory)
trackedSolution = solveStateTracking(optimalTrajectory.exportToStatesTable())
trackedSolution2 = solveMarkerTracking(markersRef, trackedSolution)
| 34.160237 | 79 | 0.633687 |
1680693e61459262ca19480a0c2453b04b05a5a0
| 992 |
py
|
Python
|
StorageSystem.py
|
aaronFritz2302/ZoomAuto
|
41af90dc35104bfea970b6b61694e105a625535c
|
[
"MIT"
] | null | null | null |
StorageSystem.py
|
aaronFritz2302/ZoomAuto
|
41af90dc35104bfea970b6b61694e105a625535c
|
[
"MIT"
] | null | null | null |
StorageSystem.py
|
aaronFritz2302/ZoomAuto
|
41af90dc35104bfea970b6b61694e105a625535c
|
[
"MIT"
] | null | null | null |
import sqlite3
from pandas import DataFrame
conn = sqlite3.connect('./data.db',check_same_thread=False)
| 32 | 138 | 0.582661 |
1680b6fe6e7e3043a7d70ac1ab9bfc138b53e7ea
| 5,255 |
py
|
Python
|
pymapd/_parsers.py
|
mflaxman10/pymapd
|
00b72ae399a0ff829507ee0b3a2b7404f3a06c26
|
[
"Apache-2.0"
] | null | null | null |
pymapd/_parsers.py
|
mflaxman10/pymapd
|
00b72ae399a0ff829507ee0b3a2b7404f3a06c26
|
[
"Apache-2.0"
] | null | null | null |
pymapd/_parsers.py
|
mflaxman10/pymapd
|
00b72ae399a0ff829507ee0b3a2b7404f3a06c26
|
[
"Apache-2.0"
] | null | null | null |
"""
Utility methods for parsing data returned from MapD
"""
import datetime
from collections import namedtuple
from sqlalchemy import text
import mapd.ttypes as T
from ._utils import seconds_to_time
Description = namedtuple("Description", ["name", "type_code", "display_size",
"internal_size", "precision", "scale",
"null_ok"])
ColumnDetails = namedtuple("ColumnDetails", ["name", "type", "nullable",
"precision", "scale",
"comp_param"])
_typeattr = {
'SMALLINT': 'int',
'INT': 'int',
'BIGINT': 'int',
'TIME': 'int',
'TIMESTAMP': 'int',
'DATE': 'int',
'BOOL': 'int',
'FLOAT': 'real',
'DECIMAL': 'real',
'DOUBLE': 'real',
'STR': 'str',
}
_thrift_types_to_values = T.TDatumType._NAMES_TO_VALUES
_thrift_values_to_types = T.TDatumType._VALUES_TO_NAMES
def _extract_description(row_desc):
# type: (List[T.TColumnType]) -> List[Description]
"""
Return a tuple of (name, type_code, display_size, internal_size,
precision, scale, null_ok)
https://www.python.org/dev/peps/pep-0249/#description
"""
return [Description(col.col_name, col.col_type.type,
None, None, None, None,
col.col_type.nullable)
for col in row_desc]
def _load_schema(buf):
"""
Load a `pyarrow.Schema` from a buffer written to shared memory
Parameters
----------
buf : pyarrow.Buffer
Returns
-------
schema : pyarrow.Schema
"""
import pyarrow as pa
reader = pa.RecordBatchStreamReader(buf)
return reader.schema
def _load_data(buf, schema):
"""
Load a `pandas.DataFrame` from a buffer written to shared memory
Parameters
----------
buf : pyarrow.Buffer
shcema : pyarrow.Schema
Returns
-------
df : pandas.DataFrame
"""
import pyarrow as pa
message = pa.read_message(buf)
rb = pa.read_record_batch(message, schema)
return rb.to_pandas()
def _parse_tdf_gpu(tdf):
"""
Parse the results of a select ipc_gpu into a GpuDataFrame
Parameters
----------
tdf : TDataFrame
Returns
-------
gdf : GpuDataFrame
"""
import numpy as np
from pygdf.gpuarrow import GpuArrowReader
from pygdf.dataframe import DataFrame
from numba import cuda
from numba.cuda.cudadrv import drvapi
from .shm import load_buffer
ipc_handle = drvapi.cu_ipc_mem_handle(*tdf.df_handle)
ipch = cuda.driver.IpcHandle(None, ipc_handle, size=tdf.df_size)
ctx = cuda.current_context()
dptr = ipch.open(ctx)
schema_buffer = load_buffer(tdf.sm_handle, tdf.sm_size)
# TODO: extra copy.
schema_buffer = np.frombuffer(schema_buffer.to_pybytes(), dtype=np.uint8)
dtype = np.dtype(np.byte)
darr = cuda.devicearray.DeviceNDArray(shape=dptr.size,
strides=dtype.itemsize,
dtype=dtype,
gpu_data=dptr)
reader = GpuArrowReader(schema_buffer, darr)
df = DataFrame()
for k, v in reader.to_dict().items():
df[k] = v
return df
| 27.952128 | 79 | 0.597146 |
16833799777639519b435db61702159dbc70cb57
| 20,687 |
py
|
Python
|
featuretools/entityset/entity.py
|
rohit901/featuretools
|
20bee224782acf94909c2bf33239fd5332a8c1de
|
[
"BSD-3-Clause"
] | 1 |
2021-07-30T16:03:48.000Z
|
2021-07-30T16:03:48.000Z
|
featuretools/entityset/entity.py
|
rohit901/featuretools
|
20bee224782acf94909c2bf33239fd5332a8c1de
|
[
"BSD-3-Clause"
] | 13 |
2021-03-04T19:29:21.000Z
|
2022-01-21T10:49:20.000Z
|
featuretools/entityset/entity.py
|
rohit901/featuretools
|
20bee224782acf94909c2bf33239fd5332a8c1de
|
[
"BSD-3-Clause"
] | 2 |
2021-02-09T21:37:48.000Z
|
2021-12-22T16:13:27.000Z
|
import logging
import warnings
import dask.dataframe as dd
import numpy as np
import pandas as pd
from featuretools import variable_types as vtypes
from featuretools.utils.entity_utils import (
col_is_datetime,
convert_all_variable_data,
convert_variable_data,
get_linked_vars,
infer_variable_types
)
from featuretools.utils.gen_utils import import_or_none, is_instance
from featuretools.utils.wrangle import _check_time_type, _dataframes_equal
from featuretools.variable_types import Text, find_variable_types
ks = import_or_none('databricks.koalas')
logger = logging.getLogger('featuretools.entityset')
_numeric_types = vtypes.PandasTypes._pandas_numerics
_categorical_types = [vtypes.PandasTypes._categorical]
_datetime_types = vtypes.PandasTypes._pandas_datetimes
def _get_variable(self, variable_id):
"""Get variable instance
Args:
variable_id (str) : Id of variable to get.
Returns:
:class:`.Variable` : Instance of variable.
Raises:
RuntimeError : if no variable exist with provided id
"""
for v in self.variables:
if v.id == variable_id:
return v
raise KeyError("Variable: %s not found in entity" % (variable_id))
def convert_variable_type(self, variable_id, new_type,
convert_data=True,
**kwargs):
"""Convert variable in dataframe to different type
Args:
variable_id (str) : Id of variable to convert.
new_type (subclass of `Variable`) : Type of variable to convert to.
entityset (:class:`.BaseEntitySet`) : EntitySet associated with this entity.
convert_data (bool) : If True, convert underlying data in the EntitySet.
Raises:
RuntimeError : Raises if it cannot convert the underlying data
Examples:
>>> from featuretools.tests.testing_utils import make_ecommerce_entityset
>>> es = make_ecommerce_entityset()
>>> es["customers"].convert_variable_type("engagement_level", vtypes.Categorical)
"""
if convert_data:
# first, convert the underlying data (or at least try to)
self.df = convert_variable_data(df=self.df,
column_id=variable_id,
new_type=new_type,
**kwargs)
# replace the old variable with the new one, maintaining order
variable = self._get_variable(variable_id)
new_variable = new_type.create_from(variable)
self.variables[self.variables.index(variable)] = new_variable
def _create_variables(self, variable_types, index, time_index, secondary_time_index):
"""Extracts the variables from a dataframe
Args:
variable_types (dict[str -> types/str/dict[str -> type]]) : An entity's
variable_types dict maps string variable ids to types (:class:`.Variable`)
or type_strings (str) or (type, kwargs) to pass keyword arguments to the Variable.
index (str): Name of index column
time_index (str or None): Name of time_index column
secondary_time_index (dict[str: [str]]): Dictionary of secondary time columns
that each map to a list of columns that depend on that secondary time
"""
variables = []
variable_types = variable_types.copy() or {}
string_to_class_map = find_variable_types()
# TODO: Remove once Text has been removed from variable types
string_to_class_map[Text.type_string] = Text
for vid in variable_types.copy():
vtype = variable_types[vid]
if isinstance(vtype, str):
if vtype in string_to_class_map:
variable_types[vid] = string_to_class_map[vtype]
else:
variable_types[vid] = string_to_class_map['unknown']
warnings.warn("Variable type {} was unrecognized, Unknown variable type was used instead".format(vtype))
if index not in variable_types:
variable_types[index] = vtypes.Index
link_vars = get_linked_vars(self)
inferred_variable_types = infer_variable_types(self.df,
link_vars,
variable_types,
time_index,
secondary_time_index)
inferred_variable_types.update(variable_types)
for v in inferred_variable_types:
# TODO document how vtype can be tuple
vtype = inferred_variable_types[v]
if isinstance(vtype, tuple):
# vtype is (ft.Variable, dict_of_kwargs)
_v = vtype[0](v, self, **vtype[1])
else:
_v = inferred_variable_types[v](v, self)
variables += [_v]
# convert data once we've inferred
self.df = convert_all_variable_data(df=self.df,
variable_types=inferred_variable_types)
# make sure index is at the beginning
index_variable = [v for v in variables
if v.id == index][0]
self.variables = [index_variable] + [v for v in variables
if v.id != index]
def update_data(self, df, already_sorted=False,
recalculate_last_time_indexes=True):
'''Update entity's internal dataframe, optionaly making sure data is sorted,
reference indexes to other entities are consistent, and last_time_indexes
are consistent.
'''
if len(df.columns) != len(self.variables):
raise ValueError("Updated dataframe contains {} columns, expecting {}".format(len(df.columns),
len(self.variables)))
for v in self.variables:
if v.id not in df.columns:
raise ValueError("Updated dataframe is missing new {} column".format(v.id))
# Make sure column ordering matches variable ordering
self.df = df[[v.id for v in self.variables]]
self.set_index(self.index)
if self.time_index is not None:
self.set_time_index(self.time_index, already_sorted=already_sorted)
self.set_secondary_time_index(self.secondary_time_index)
if recalculate_last_time_indexes and self.last_time_index is not None:
self.entityset.add_last_time_indexes(updated_entities=[self.id])
self.entityset.reset_data_description()
def add_interesting_values(self, max_values=5, verbose=False):
"""
Find interesting values for categorical variables, to be used to
generate "where" clauses
Args:
max_values (int) : Maximum number of values per variable to add.
verbose (bool) : If True, print summary of interesting values found.
Returns:
None
"""
for variable in self.variables:
# some heuristics to find basic 'where'-able variables
if isinstance(variable, vtypes.Discrete):
variable.interesting_values = pd.Series(dtype=variable.entity.df[variable.id].dtype)
# TODO - consider removing this constraints
# don't add interesting values for entities in relationships
skip = False
for r in self.entityset.relationships:
if variable in [r.child_variable, r.parent_variable]:
skip = True
break
if skip:
continue
counts = self.df[variable.id].value_counts()
# find how many of each unique value there are; sort by count,
# and add interesting values to each variable
total_count = np.sum(counts)
counts[:] = counts.sort_values()[::-1]
for i in range(min(max_values, len(counts.index))):
idx = counts.index[i]
# add the value to interesting_values if it represents more than
# 25% of the values we have not seen so far
if len(counts.index) < 25:
if verbose:
msg = "Variable {}: Marking {} as an "
msg += "interesting value"
logger.info(msg.format(variable.id, idx))
variable.interesting_values = variable.interesting_values.append(pd.Series([idx]))
else:
fraction = counts[idx] / total_count
if fraction > 0.05 and fraction < 0.95:
if verbose:
msg = "Variable {}: Marking {} as an "
msg += "interesting value"
logger.info(msg.format(variable.id, idx))
variable.interesting_values = variable.interesting_values.append(pd.Series([idx]))
# total_count -= counts[idx]
else:
break
self.entityset.reset_data_description()
def delete_variables(self, variable_ids):
"""
Remove variables from entity's dataframe and from
self.variables
Args:
variable_ids (list[str]): Variables to delete
Returns:
None
"""
# check if variable is not a list
if not isinstance(variable_ids, list):
raise TypeError('variable_ids must be a list of variable names')
if len(variable_ids) == 0:
return
self.df = self.df.drop(variable_ids, axis=1)
for v_id in variable_ids:
v = self._get_variable(v_id)
self.variables.remove(v)
def set_index(self, variable_id, unique=True):
"""
Args:
variable_id (string) : Name of an existing variable to set as index.
unique (bool) : Whether to assert that the index is unique.
"""
if isinstance(self.df, pd.DataFrame):
self.df = self.df.set_index(self.df[variable_id], drop=False)
self.df.index.name = None
if unique:
assert self.df.index.is_unique, "Index is not unique on dataframe " \
"(Entity {})".format(self.id)
self.convert_variable_type(variable_id, vtypes.Index, convert_data=False)
self.index = variable_id
def _create_index(index, make_index, df):
'''Handles index creation logic base on user input'''
created_index = None
if index is None:
# Case 1: user wanted to make index but did not specify column name
assert not make_index, "Must specify an index name if make_index is True"
# Case 2: make_index not specified but no index supplied, use first column
warnings.warn(("Using first column as index. "
"To change this, specify the index parameter"))
index = df.columns[0]
elif make_index and index in df.columns:
# Case 3: user wanted to make index but column already exists
raise RuntimeError("Cannot make index: index variable already present")
elif index not in df.columns:
if not make_index:
# Case 4: user names index, it is not in df. does not specify
# make_index. Make new index column and warn
warnings.warn("index {} not found in dataframe, creating new "
"integer column".format(index))
# Case 5: make_index with no errors or warnings
# (Case 4 also uses this code path)
if isinstance(df, dd.DataFrame):
df[index] = 1
df[index] = df[index].cumsum() - 1
elif is_instance(df, ks, 'DataFrame'):
df = df.koalas.attach_id_column('distributed-sequence', index)
else:
df.insert(0, index, range(len(df)))
created_index = index
# Case 6: user specified index, which is already in df. No action needed.
return created_index, index, df
def _validate_entity_params(id, df, time_index):
'''Validation checks for Entity inputs'''
assert isinstance(id, str), "Entity id must be a string"
assert len(df.columns) == len(set(df.columns)), "Duplicate column names"
for c in df.columns:
if not isinstance(c, str):
raise ValueError("All column names must be strings (Column {} "
"is not a string)".format(c))
if time_index is not None and time_index not in df.columns:
raise LookupError('Time index not found in dataframe')
| 42.391393 | 124 | 0.592353 |
16848dd03e02c952cce813e4092be02064f38ca9
| 1,470 |
py
|
Python
|
githubdl/url_helpers.py
|
wilvk/githubdl
|
1dc8c1c0d93a8e4b8155aecf4f5e73e2931ed920
|
[
"MIT"
] | 16 |
2018-06-20T00:01:40.000Z
|
2022-01-24T08:16:17.000Z
|
githubdl/url_helpers.py
|
wilvk/githubdl
|
1dc8c1c0d93a8e4b8155aecf4f5e73e2931ed920
|
[
"MIT"
] | 12 |
2018-07-18T21:09:37.000Z
|
2020-03-28T23:38:13.000Z
|
githubdl/url_helpers.py
|
wilvk/githubdl
|
1dc8c1c0d93a8e4b8155aecf4f5e73e2931ed920
|
[
"MIT"
] | null | null | null |
import re
from urllib.parse import urlparse
import logging
| 31.276596 | 76 | 0.706803 |
168519bcca14cbc5945efcceae792622fe09d3d9
| 25,777 |
py
|
Python
|
RECOVERED_FILES/root/ez-segway/simulator/ez_lib/cen_scheduler.py
|
AlsikeE/Ez
|
2f84ac1896a5b6d8f467c14d3618274bdcfd2cad
|
[
"Apache-2.0"
] | null | null | null |
RECOVERED_FILES/root/ez-segway/simulator/ez_lib/cen_scheduler.py
|
AlsikeE/Ez
|
2f84ac1896a5b6d8f467c14d3618274bdcfd2cad
|
[
"Apache-2.0"
] | null | null | null |
RECOVERED_FILES/root/ez-segway/simulator/ez_lib/cen_scheduler.py
|
AlsikeE/Ez
|
2f84ac1896a5b6d8f467c14d3618274bdcfd2cad
|
[
"Apache-2.0"
] | 1 |
2021-05-08T02:23:00.000Z
|
2021-05-08T02:23:00.000Z
|
import itertools
from ez_lib import ez_flow_tool
from collections import defaultdict
from ez_scheduler import EzScheduler
from ez_lib.ez_ob import CenUpdateInfo, UpdateNext
from misc import constants, logger
from domain.message import *
from collections import deque
from misc import global_vars
import time
import eventlet
mulog = logger.getLogger('cen_scheduler', constants.LOG_LEVEL)
| 54.039832 | 136 | 0.58141 |
16863f0872927e8b824cd132c78fbf22829a951a
| 892 |
py
|
Python
|
src/trackbar.py
|
clovadev/opencv-python
|
f9c685f8dc658f630a9742f4dd55663bde03fe7d
|
[
"MIT"
] | null | null | null |
src/trackbar.py
|
clovadev/opencv-python
|
f9c685f8dc658f630a9742f4dd55663bde03fe7d
|
[
"MIT"
] | null | null | null |
src/trackbar.py
|
clovadev/opencv-python
|
f9c685f8dc658f630a9742f4dd55663bde03fe7d
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2 as cv
# Create a black image, a window
img = np.zeros((300, 512, 3), np.uint8)
cv.namedWindow('image')
# create trackbars for color change
cv.createTrackbar('R', 'image', 0, 255, nothing)
cv.createTrackbar('G', 'image', 0, 255, nothing)
cv.createTrackbar('B', 'image', 0, 255, nothing)
# create switch for ON/OFF functionality
switch = 'OFF/ON'
cv.createTrackbar(switch, 'image', 0, 1, nothing)
while True:
# get current positions of four trackbars
r = cv.getTrackbarPos('R', 'image')
g = cv.getTrackbarPos('G', 'image')
b = cv.getTrackbarPos('B', 'image')
s = cv.getTrackbarPos(switch, 'image')
# ,
if s == 0:
img[:] = 0
else:
img[:] = [b, g, r]
#
cv.imshow('image', img)
if cv.waitKey(10) > 0:
break
cv.destroyAllWindows()
| 22.3 | 49 | 0.618834 |
1687efc3eb23ad09ae90d5260997fa4ec210ea9f
| 1,246 |
py
|
Python
|
aoc_2015/src/day20.py
|
ambertests/adventofcode
|
140ed1d71ed647d30d1e6572964cab1e89dfd105
|
[
"MIT"
] | null | null | null |
aoc_2015/src/day20.py
|
ambertests/adventofcode
|
140ed1d71ed647d30d1e6572964cab1e89dfd105
|
[
"MIT"
] | null | null | null |
aoc_2015/src/day20.py
|
ambertests/adventofcode
|
140ed1d71ed647d30d1e6572964cab1e89dfd105
|
[
"MIT"
] | null | null | null |
from functools import reduce
# https://stackoverflow.com/questions/6800193/what-is-the-most-efficient-way-of-finding-all-the-factors-of-a-number-in-python
# takes around 20s
pt1, pt2 = solve(29000000)
print("Part 1:", pt1)
print("Part 2:", pt2)
| 27.688889 | 125 | 0.50321 |
1688724e3867c7e8e39adb6579cee704e885e634
| 1,604 |
py
|
Python
|
setup.py
|
jean/labels
|
dcb6f40fb4e222068e302202dd5d7d98b4771e4b
|
[
"MIT"
] | 1 |
2019-11-06T14:08:40.000Z
|
2019-11-06T14:08:40.000Z
|
setup.py
|
jean/labels
|
dcb6f40fb4e222068e302202dd5d7d98b4771e4b
|
[
"MIT"
] | null | null | null |
setup.py
|
jean/labels
|
dcb6f40fb4e222068e302202dd5d7d98b4771e4b
|
[
"MIT"
] | null | null | null |
import pathlib
import setuptools
setuptools.setup(
name="labels",
version="0.3.0.dev0",
author="Raphael Pierzina",
author_email="[email protected]",
maintainer="Raphael Pierzina",
maintainer_email="[email protected]",
license="MIT",
url="https://github.com/hackebrot/labels",
project_urls={
"Repository": "https://github.com/hackebrot/labels",
"Issues": "https://github.com/hackebrot/labels/issues",
},
description="CLI app for managing GitHub labels for Python 3.6 and newer. ",
long_description=read("README.md"),
long_description_content_type="text/markdown",
packages=setuptools.find_packages("src"),
package_dir={"": "src"},
include_package_data=True,
zip_safe=False,
python_requires=">=3.6",
install_requires=["click", "requests", "pytoml", "attrs"],
entry_points={"console_scripts": ["labels = labels.cli:labels"]},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Utilities",
],
keywords=["github", "command-line"],
)
| 34.12766 | 81 | 0.640898 |
168883ce786ac5e2bf642b55446a3bcf835eeaa8
| 275 |
py
|
Python
|
colab/__init__.py
|
caseywstark/colab
|
e05293e45a657eda19d733bf05624a1613a7a9b7
|
[
"MIT"
] | 1 |
2015-11-05T11:49:32.000Z
|
2015-11-05T11:49:32.000Z
|
colab/__init__.py
|
caseywstark/colab
|
e05293e45a657eda19d733bf05624a1613a7a9b7
|
[
"MIT"
] | null | null | null |
colab/__init__.py
|
caseywstark/colab
|
e05293e45a657eda19d733bf05624a1613a7a9b7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
__about__ = """
This project demonstrates a social networking site. It provides profiles,
friends, photos, blogs, tribes, wikis, tweets, bookmarks, swaps,
locations and user-to-user messaging.
In 0.5 this was called "complete_project".
"""
| 27.5 | 74 | 0.705455 |
1689397a49d0387c8d71492ecee794b05a45ba83
| 862 |
py
|
Python
|
src/ralph/ui/forms/util.py
|
quamilek/ralph
|
bf7231ea096924332b874718b33cd1f43f9c783b
|
[
"Apache-2.0"
] | null | null | null |
src/ralph/ui/forms/util.py
|
quamilek/ralph
|
bf7231ea096924332b874718b33cd1f43f9c783b
|
[
"Apache-2.0"
] | null | null | null |
src/ralph/ui/forms/util.py
|
quamilek/ralph
|
bf7231ea096924332b874718b33cd1f43f9c783b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from ralph.business.models import Venture, VentureRole
| 28.733333 | 78 | 0.558005 |
1689e31b5f0f44d60b97128a67d87b2730238b68
| 28 |
py
|
Python
|
tests/syntax/missing_in_with_for.py
|
matan-h/friendly
|
3ab0fc6541c837271e8865e247750007acdd18fb
|
[
"MIT"
] | 287 |
2019-04-08T13:18:29.000Z
|
2021-03-14T19:10:21.000Z
|
tests/syntax/missing_in_with_for.py
|
matan-h/friendly
|
3ab0fc6541c837271e8865e247750007acdd18fb
|
[
"MIT"
] | 191 |
2019-04-08T14:39:18.000Z
|
2021-03-14T22:14:56.000Z
|
tests/syntax/missing_in_with_for.py
|
matan-h/friendly
|
3ab0fc6541c837271e8865e247750007acdd18fb
|
[
"MIT"
] | 9 |
2019-04-08T12:54:08.000Z
|
2020-11-20T02:26:27.000Z
|
for x range(4):
print(x)
| 9.333333 | 15 | 0.571429 |
168b7cd601c412154d052fac8164eeb139aec911
| 4,769 |
py
|
Python
|
services/users/manage.py
|
eventprotocol/event-protocol-webapp
|
38ccdc63bc744576ebb3631b7e17cfd4a09216b6
|
[
"MIT"
] | null | null | null |
services/users/manage.py
|
eventprotocol/event-protocol-webapp
|
38ccdc63bc744576ebb3631b7e17cfd4a09216b6
|
[
"MIT"
] | 11 |
2020-09-05T14:16:23.000Z
|
2022-03-03T22:33:14.000Z
|
services/users/manage.py
|
eventprotocol/event-protocol-webapp
|
38ccdc63bc744576ebb3631b7e17cfd4a09216b6
|
[
"MIT"
] | null | null | null |
"""
manage.py for flask application
"""
import unittest
import coverage
import os
from flask.cli import FlaskGroup
from project import create_app, db
from project.api.models import User
# Code coverage
COV = coverage.Coverage(
branch=True,
include='project/*',
omit=[
'project/tests/*',
'project/config.py',
]
)
COV.start()
app = create_app()
cli = FlaskGroup(create_app=create_app)
if __name__ == '__main__':
cli()
| 29.621118 | 79 | 0.642063 |
168bb7123d253d48e67b56f36bbcad938db24dd7
| 1,750 |
py
|
Python
|
keras_transformer/keras_transformer/training/custom_callbacks/CustomCheckpointer.py
|
erelcan/keras-transformer
|
ae88985dd4f1b5f91737e80c7e9c3157b60b4c4f
|
[
"Apache-2.0"
] | 3 |
2021-02-14T17:10:59.000Z
|
2021-02-14T18:09:17.000Z
|
keras_transformer/keras_transformer/training/custom_callbacks/CustomCheckpointer.py
|
erelcan/keras-transformer
|
ae88985dd4f1b5f91737e80c7e9c3157b60b4c4f
|
[
"Apache-2.0"
] | null | null | null |
keras_transformer/keras_transformer/training/custom_callbacks/CustomCheckpointer.py
|
erelcan/keras-transformer
|
ae88985dd4f1b5f91737e80c7e9c3157b60b4c4f
|
[
"Apache-2.0"
] | null | null | null |
import os
from keras.callbacks import ModelCheckpoint
from keras_transformer.training.custom_callbacks.CustomCallbackABC import CustomCallbackABC
from keras_transformer.utils.io_utils import save_to_pickle
| 35 | 117 | 0.671429 |
168c810ecd449bb3eb263646cbc454470f8c28e4
| 527 |
py
|
Python
|
train_test_val.py
|
arashk7/Yolo5_Dataset_Generator
|
aeba58b51201b8521478c777b40c4d31f0c60be9
|
[
"Apache-2.0"
] | null | null | null |
train_test_val.py
|
arashk7/Yolo5_Dataset_Generator
|
aeba58b51201b8521478c777b40c4d31f0c60be9
|
[
"Apache-2.0"
] | null | null | null |
train_test_val.py
|
arashk7/Yolo5_Dataset_Generator
|
aeba58b51201b8521478c777b40c4d31f0c60be9
|
[
"Apache-2.0"
] | null | null | null |
import os
import shutil
input_dir = 'E:\Dataset\zhitang\Dataset_Zhitang_Yolo5'
output_dir = 'E:\Dataset\zhitang\Dataset_Zhitang_Yolo5\ZhitangYolo5'
in_img_dir = os.path.join(input_dir, 'Images')
in_label_dir = os.path.join(input_dir, 'Labels')
out_img_dir = os.path.join(output_dir, 'images')
out_label_dir = os.path.join(output_dir, 'labels')
splits = {'train','test','valid'}
files = os.listdir(in_img_dir)
count = len(files)
for f in files:
print(f)
src = os.path.join(input_dir,f)
shutil.copyfile(src, dst)
| 22.913043 | 68 | 0.736243 |
168cde4a792e9985c473078c1d3e1678761198e7
| 4,873 |
py
|
Python
|
homeassistant/components/media_player/pjlink.py
|
dauden1184/home-assistant
|
f4c6d389b77d0efa86644e76604eaea5d21abdb5
|
[
"Apache-2.0"
] | 4 |
2019-01-10T14:47:54.000Z
|
2021-04-22T02:06:27.000Z
|
homeassistant/components/media_player/pjlink.py
|
dauden1184/home-assistant
|
f4c6d389b77d0efa86644e76604eaea5d21abdb5
|
[
"Apache-2.0"
] | 6 |
2021-02-08T21:02:40.000Z
|
2022-03-12T00:52:16.000Z
|
homeassistant/components/media_player/pjlink.py
|
dauden1184/home-assistant
|
f4c6d389b77d0efa86644e76604eaea5d21abdb5
|
[
"Apache-2.0"
] | 3 |
2018-08-29T19:26:20.000Z
|
2020-01-19T11:58:22.000Z
|
"""
Support for controlling projector via the PJLink protocol.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.pjlink/
"""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
PLATFORM_SCHEMA, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE, MediaPlayerDevice)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, STATE_OFF, STATE_ON)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pypjlink2==1.2.0']
_LOGGER = logging.getLogger(__name__)
CONF_ENCODING = 'encoding'
DEFAULT_PORT = 4352
DEFAULT_ENCODING = 'utf-8'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ENCODING, default=DEFAULT_ENCODING): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
})
SUPPORT_PJLINK = SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the PJLink platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
encoding = config.get(CONF_ENCODING)
password = config.get(CONF_PASSWORD)
if 'pjlink' not in hass.data:
hass.data['pjlink'] = {}
hass_data = hass.data['pjlink']
device_label = "{}:{}".format(host, port)
if device_label in hass_data:
return
device = PjLinkDevice(host, port, name, encoding, password)
hass_data[device_label] = device
add_entities([device], True)
def format_input_source(input_source_name, input_source_number):
"""Format input source for display in UI."""
return "{} {}".format(input_source_name, input_source_number)
def turn_on(self):
"""Turn projector on."""
with self.projector() as projector:
projector.set_power('on')
def mute_volume(self, mute):
"""Mute (true) of unmute (false) media player."""
with self.projector() as projector:
from pypjlink import MUTE_AUDIO
projector.set_mute(MUTE_AUDIO, mute)
def select_source(self, source):
"""Set the input source."""
source = self._source_name_mapping[source]
with self.projector() as projector:
projector.set_input(*source)
| 31.038217 | 78 | 0.65668 |
168da4e09bd5b50aa5b8cd08e50f215c17b399b2
| 608 |
py
|
Python
|
leetcode/regex_matching.py
|
Kaushalya/algo_journal
|
bcea8afda0dc86b36452378e3bcff9b0f57d6856
|
[
"Apache-2.0"
] | null | null | null |
leetcode/regex_matching.py
|
Kaushalya/algo_journal
|
bcea8afda0dc86b36452378e3bcff9b0f57d6856
|
[
"Apache-2.0"
] | null | null | null |
leetcode/regex_matching.py
|
Kaushalya/algo_journal
|
bcea8afda0dc86b36452378e3bcff9b0f57d6856
|
[
"Apache-2.0"
] | null | null | null |
# Level: Hard
if __name__ == "__main__":
ss = 'abbbbbc'
p = 'a*'
print(isMatch(ss, p))
| 17.882353 | 39 | 0.361842 |
168db9c8444379377b3a611c0a5f87f083f3ec4d
| 3,217 |
py
|
Python
|
tests/factories.py
|
luzik/waliki
|
b7db696075ceebb5676be61f44e2d806cc472255
|
[
"BSD-3-Clause"
] | 324 |
2015-01-02T20:48:33.000Z
|
2021-12-11T14:44:34.000Z
|
tests/factories.py
|
luzik/waliki
|
b7db696075ceebb5676be61f44e2d806cc472255
|
[
"BSD-3-Clause"
] | 103 |
2015-01-02T03:01:34.000Z
|
2020-04-02T19:03:53.000Z
|
tests/factories.py
|
luzik/waliki
|
b7db696075ceebb5676be61f44e2d806cc472255
|
[
"BSD-3-Clause"
] | 84 |
2015-01-07T08:53:05.000Z
|
2021-01-04T00:26:38.000Z
|
import factory
from django.contrib.auth.models import User, Group, Permission
from waliki.models import ACLRule, Page, Redirect
| 28.723214 | 98 | 0.608642 |
168dc722af15d363851566ae2eeabcf9ccc50653
| 68,372 |
py
|
Python
|
nxt_editor/commands.py
|
dalteocraft/nxt_editor
|
18992da7cfa89769568434ec08d787510e09f1c4
|
[
"MIT"
] | 131 |
2020-12-03T08:01:26.000Z
|
2022-03-07T03:41:37.000Z
|
nxt_editor/commands.py
|
dalteocraft/nxt_editor
|
18992da7cfa89769568434ec08d787510e09f1c4
|
[
"MIT"
] | 127 |
2020-12-07T21:43:02.000Z
|
2022-02-17T22:31:14.000Z
|
nxt_editor/commands.py
|
dalteocraft/nxt_editor
|
18992da7cfa89769568434ec08d787510e09f1c4
|
[
"MIT"
] | 17 |
2020-12-08T08:06:44.000Z
|
2021-11-18T05:40:11.000Z
|
# Built-in
import copy
import logging
import time
# External
from Qt.QtWidgets import QUndoCommand
# Internal
from nxt_editor import colors
from nxt_editor import user_dir
from nxt import nxt_path
from nxt.nxt_layer import LAYERS, SAVE_KEY
from nxt.nxt_node import (INTERNAL_ATTRS, META_ATTRS, get_node_as_dict,
list_merger)
from nxt import nxt_io
from nxt import GRID_SIZE
import nxt_editor
logger = logging.getLogger(nxt_editor.LOGGER_NAME)
class SetAttributeComment(SetNodeAttributeData):
"""Set attribute comment"""
class SetCompute(SetNodeAttributeValue):
"""Set node code value"""
class SetNodeComment(SetNodeAttributeValue):
"""Set node comment"""
class SetNodeInstance(SetNodeAttributeValue):
"""Set node instance"""
class SetNodeEnabledState(SetNodeAttributeValue):
"""Set node enabled state"""
class SetNodeCollapse(NxtCommand):
"""Set the node collapse state"""
def _add_node_hierarchy(base_node_path, model, layer):
stage = model.stage
comp_layer = model.comp_layer
new_node_paths = []
new_nodes = []
node_hierarchy = nxt_path.str_path_to_node_namespace(base_node_path)
new_node_table, dirty = stage.add_node_hierarchy(node_hierarchy,
parent=None, layer=layer,
comp_layer=comp_layer)
for nn_p, n in new_node_table:
display_node = comp_layer.lookup(nn_p)
if display_node is not None:
display_child_order = getattr(display_node,
INTERNAL_ATTRS.CHILD_ORDER)
old_child_order = getattr(n, INTERNAL_ATTRS.CHILD_ORDER)
new_child_order = list_merger(display_child_order,
old_child_order)
setattr(n, INTERNAL_ATTRS.CHILD_ORDER, new_child_order)
new_node_paths += [nn_p]
new_nodes += [n]
return new_nodes, new_node_paths, dirty
def undo_debug(cmd, start):
update_time = str(int(round((time.time() - start) * 1000)))
logger.debug("Undo " + cmd.text() + " | " + update_time + "ms")
def redo_debug(cmd, start):
update_time = str(int(round((time.time() - start) * 1000)))
logger.debug(cmd.text() + " | " + update_time + "ms")
| 40.60095 | 80 | 0.597379 |
168de834f7c08dea94c1b268f9213453f995fc3e
| 6,642 |
py
|
Python
|
mietrechtspraxis/mietrechtspraxis/doctype/arbitration_authority/arbitration_authority.py
|
libracore/mietrechtspraxis
|
7b2320a70b98b086be136a86b1ab4fadfce215ff
|
[
"MIT"
] | 1 |
2021-07-15T13:25:23.000Z
|
2021-07-15T13:25:23.000Z
|
mietrechtspraxis/mietrechtspraxis/doctype/arbitration_authority/arbitration_authority.py
|
libracore/mietrechtspraxis
|
7b2320a70b98b086be136a86b1ab4fadfce215ff
|
[
"MIT"
] | 1 |
2022-01-27T13:30:56.000Z
|
2022-01-27T13:30:56.000Z
|
mietrechtspraxis/mietrechtspraxis/doctype/arbitration_authority/arbitration_authority.py
|
libracore/mietrechtspraxis
|
7b2320a70b98b086be136a86b1ab4fadfce215ff
|
[
"MIT"
] | 2 |
2021-08-14T22:23:08.000Z
|
2021-09-08T09:31:51.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, libracore AG and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from datetime import datetime
from PyPDF2 import PdfFileWriter
from frappe.utils.file_manager import save_file
def _get_sb(**kwargs):
'''
call on [IP]/api/method/mietrechtspraxis.api.get_sb
Mandatory Parameter:
- token
- plz
'''
# check that token is present
try:
token = kwargs['token']
except:
# 400 Bad Request (Missing Token)
return raise_4xx(400, 'Bad Request', 'Token Required')
# check that token is correct
if not token == frappe.db.get_single_value('mietrechtspraxis API', 'token'):
# 401 Unauthorized (Invalid Token)
return raise_4xx(401, 'Unauthorized', 'Invalid Token')
# check that plz_city is present
try:
plz_city = kwargs['plz_city']
except:
# 400 Bad Request (Missing PLZ/City)
return raise_4xx(400, 'Bad Request', 'PLZ/City Required')
answer = []
# lookup for plz
city_results = frappe.db.sql("""
SELECT
`city`,
`municipality`,
`district`,
`canton`
FROM `tabPincode`
WHERE `pincode` = '{plz_city}'
ORDER BY `city` ASC
""".format(plz_city=plz_city), as_dict=True)
if len(city_results) < 1:
# lookup for city
city_results = frappe.db.sql("""
SELECT
`city`,
`municipality`,
`district`,
`canton`
FROM `tabPincode`
WHERE `city` LIKE '%{plz_city}%'
ORDER BY `city` ASC
""".format(plz_city=plz_city), as_dict=True)
if len(city_results) > 0:
for city in city_results:
data = {}
data['plz'] = city.plz
data['ort'] = city.city
data['gemeinde'] = city.municipality
data['bezirk'] = city.district
data['kanton'] = city.canton
data['allgemein'] = get_informations(city.canton)
data['schlichtungsbehoerde'] = frappe.db.sql("""
SELECT
`schlichtungsbehoerde`.`titel` AS `Titel`,
`schlichtungsbehoerde`.`telefon` AS `Telefon`,
`schlichtungsbehoerde`.`kuendigungstermine` AS `Kndigungstermine`,
`schlichtungsbehoerde`.`pauschalen` AS `Pauschalen`,
`schlichtungsbehoerde`.`rechtsberatung` AS `Rechtsberatung`,
`schlichtungsbehoerde`.`elektronische_eingaben` AS `elektronische Eingaben`,
`schlichtungsbehoerde`.`homepage` AS `Homepage`
FROM `tabArbitration Authority` AS `schlichtungsbehoerde`
LEFT JOIN `tabMunicipality Table` AS `geminendentbl` ON `schlichtungsbehoerde`.`name`=`geminendentbl`.`parent`
WHERE `geminendentbl`.`municipality` = '{municipality}'
""".format(municipality=city.municipality), as_dict=True)
answer.append(data)
if len(answer) > 0:
return raise_200(answer)
else:
# 404 Not Found
return raise_4xx(404, 'Not Found', 'No results')
else:
# 404 Not Found
return raise_4xx(404, 'Not Found', 'No results')
| 43.986755 | 187 | 0.483439 |
168eb7379683dd807fa4203db108dc8a9b170baa
| 323 |
py
|
Python
|
easysockets/client_socket.py
|
Matthias1590/EasySockets
|
70d33a04e862b682b87bdf2103bcc1d7da06994e
|
[
"MIT"
] | 2 |
2022-01-10T12:25:45.000Z
|
2022-01-15T08:01:32.000Z
|
easysockets/client_socket.py
|
Matthias1590/EasySockets
|
70d33a04e862b682b87bdf2103bcc1d7da06994e
|
[
"MIT"
] | null | null | null |
easysockets/client_socket.py
|
Matthias1590/EasySockets
|
70d33a04e862b682b87bdf2103bcc1d7da06994e
|
[
"MIT"
] | null | null | null |
from .connection import Connection
import socket
| 24.846154 | 73 | 0.696594 |
168f0267685e958dd990feeae60a1086e6b78107
| 31,038 |
py
|
Python
|
pxr/usd/usdGeom/testenv/testUsdGeomSchemata.py
|
yurivict/USD
|
3b097e3ba8fabf1777a1256e241ea15df83f3065
|
[
"Apache-2.0"
] | 1 |
2022-03-16T01:40:10.000Z
|
2022-03-16T01:40:10.000Z
|
pxr/usd/usdGeom/testenv/testUsdGeomSchemata.py
|
yurivict/USD
|
3b097e3ba8fabf1777a1256e241ea15df83f3065
|
[
"Apache-2.0"
] | null | null | null |
pxr/usd/usdGeom/testenv/testUsdGeomSchemata.py
|
yurivict/USD
|
3b097e3ba8fabf1777a1256e241ea15df83f3065
|
[
"Apache-2.0"
] | 1 |
2018-10-03T19:08:33.000Z
|
2018-10-03T19:08:33.000Z
|
#!/pxrpythonsubst
#
# Copyright 2017 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
# pylint: disable=map-builtin-not-iterating
import sys, unittest
from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf
if __name__ == "__main__":
unittest.main()
| 42.69326 | 100 | 0.63055 |
168fdf67ec71ebdf125bbe9b6f5c14dad854391f
| 1,310 |
py
|
Python
|
round_robin_generator/matchup_times.py
|
avadavat/round_robin_generator
|
242d522386f6af26db029232fcffb51004ff4c59
|
[
"MIT"
] | null | null | null |
round_robin_generator/matchup_times.py
|
avadavat/round_robin_generator
|
242d522386f6af26db029232fcffb51004ff4c59
|
[
"MIT"
] | 5 |
2020-04-26T19:44:41.000Z
|
2020-05-01T16:26:06.000Z
|
round_robin_generator/matchup_times.py
|
avadavat/round_robin_generator
|
242d522386f6af26db029232fcffb51004ff4c59
|
[
"MIT"
] | null | null | null |
import pandas as pd
from datetime import timedelta
| 48.518519 | 116 | 0.636641 |
16904816a9bda6205128c0d91b67e3ab2be3d489
| 3,943 |
py
|
Python
|
src/commands/locate_item.py
|
seisatsu/DennisMUD-ESP32
|
b63d4b914c5e8d0f9714042997c64919b20be842
|
[
"MIT"
] | 19 |
2018-10-02T03:58:46.000Z
|
2021-04-09T13:09:23.000Z
|
commands/locate_item.py
|
seisatsu/Dennis
|
8f1892f21beba6b21b4f7b9ba3062296bb1dc4b9
|
[
"MIT"
] | 100 |
2018-09-22T22:54:35.000Z
|
2021-04-16T17:46:34.000Z
|
src/commands/locate_item.py
|
seisatsu/DennisMUD-ESP32
|
b63d4b914c5e8d0f9714042997c64919b20be842
|
[
"MIT"
] | 1 |
2022-01-03T02:21:56.000Z
|
2022-01-03T02:21:56.000Z
|
#######################
# Dennis MUD #
# locate_item.py #
# Copyright 2018-2020 #
# Michael D. Reiley #
#######################
# **********
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# **********
NAME = "locate item"
CATEGORIES = ["items"]
ALIASES = ["find item"]
USAGE = "locate item <item_id>"
DESCRIPTION = """Find out what room the item <item_id> is in, or who is holding it.
You can only locate an item that you own.
Wizards can locate any item.
Ex. `locate item 4`"""
| 41.072917 | 116 | 0.633274 |
16904f40b9743948ab5dc6a0d2f55015295bc2fd
| 2,787 |
py
|
Python
|
modelling/scsb/models/monthly-comparisons.py
|
bcgov-c/wally
|
264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06
|
[
"Apache-2.0"
] | null | null | null |
modelling/scsb/models/monthly-comparisons.py
|
bcgov-c/wally
|
264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06
|
[
"Apache-2.0"
] | null | null | null |
modelling/scsb/models/monthly-comparisons.py
|
bcgov-c/wally
|
264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06
|
[
"Apache-2.0"
] | null | null | null |
import json
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from xgboost import XGBRegressor
from catboost import CatBoostRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error as MSE, r2_score
import math
# with open('../../data/output/training_data/annual_mean_training_dataset_08-11-2020.json', 'r') as f:
# data = json.load(f)
all_zones_df = pd.read_csv("../data/scsb_all_zones.csv")
zone_25_df = pd.read_csv("../data/scsb_zone_25.csv")
zone_26_df = pd.read_csv("../data/scsb_zone_26.csv")
zone_27_df = pd.read_csv("../data/scsb_zone_27.csv")
month_dependant_variables = ['jan_dist','feb_dist','mar_dist','apr_dist','may_dist','jun_dist','jul_dist','aug_dist','sep_dist','oct_dist','nov_dist','dec_dist']
month_labels = [x[0:3] for x in month_dependant_variables]
data = zone_26_df
xgb_results = []
rfr_results = []
dtr_results = []
# calculate monthly estimations for 3 models
for dependant_month in month_dependant_variables:
features_df = data[['median_elevation', 'glacial_coverage', 'annual_precipitation', 'potential_evapo_transpiration', dependant_month]]
X = features_df.drop([dependant_month], axis=1)
y = features_df.get(dependant_month)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42)
xgb = XGBRegressor(random_state=42)
xgb.fit(X_train, y_train)
xgb_results.append(xgb.predict(X))
rfr = RandomForestRegressor(random_state=42)
rfr.fit(X_train, y_train)
rfr_results.append(rfr.predict(X))
dtr = DecisionTreeRegressor(random_state=42)
dtr.fit(X_train, y_train)
dtr_results.append(dtr.predict(X))
# compare the outputs of scsb against the 3 models
for row_target_index in range(20):
xgb_row = []
rfr_row = []
dtr_row = []
for month in range(12):
xgb_row.append(xgb_results[month][row_target_index])
rfr_row.append(rfr_results[month][row_target_index])
dtr_row.append(dtr_results[month][row_target_index])
plt.plot(data[month_dependant_variables].iloc[row_target_index], '-', label='scsb', color='blue', alpha=0.5)
plt.plot(xgb_row, '-', label='xgboost', color='red', alpha=0.5)
plt.plot(rfr_row, '-', label='randomforest', color='green', alpha=0.5)
plt.plot(dtr_row, '-', label='decisiontree', color='purple', alpha=0.5)
plt.legend(loc='best')
plt.xticks(month_dependant_variables, month_labels)
plt.xlabel('Month')
plt.ylabel('Monthly Distribution')
name = data['name'].iloc[row_target_index]
plt.title(name)
plt.savefig('../plots/{}.png'.format(name))
plt.show()
| 38.708333 | 161 | 0.734482 |
1690da2be65319bb6696ac8f2ce11540524171c2
| 14,922 |
py
|
Python
|
src/week2-mlflow/AutoML/XGBoost-fake-news-automl.py
|
xzhnshng/databricks-zero-to-mlops
|
f1691c6f6137ad8b938e64cea4700c7011efb800
|
[
"CC0-1.0"
] | null | null | null |
src/week2-mlflow/AutoML/XGBoost-fake-news-automl.py
|
xzhnshng/databricks-zero-to-mlops
|
f1691c6f6137ad8b938e64cea4700c7011efb800
|
[
"CC0-1.0"
] | null | null | null |
src/week2-mlflow/AutoML/XGBoost-fake-news-automl.py
|
xzhnshng/databricks-zero-to-mlops
|
f1691c6f6137ad8b938e64cea4700c7011efb800
|
[
"CC0-1.0"
] | null | null | null |
# Databricks notebook source
# MAGIC %md
# MAGIC # XGBoost training
# MAGIC This is an auto-generated notebook. To reproduce these results, attach this notebook to the **10-3-ML-Cluster** cluster and rerun it.
# MAGIC - Compare trials in the [MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false)
# MAGIC - Navigate to the parent notebook [here](#notebook/406583024052798) (If you launched the AutoML experiment using the Experiments UI, this link isn't very useful.)
# MAGIC - Clone this notebook into your project folder by selecting **File > Clone** in the notebook toolbar.
# MAGIC
# MAGIC Runtime Version: _10.3.x-cpu-ml-scala2.12_
# COMMAND ----------
import mlflow
import databricks.automl_runtime
# Use MLflow to track experiments
mlflow.set_experiment("/Users/[email protected]/databricks_automl/label_news_articles_csv-2022_03_12-15_38")
target_col = "label"
# COMMAND ----------
# MAGIC %md
# MAGIC ## Load Data
# COMMAND ----------
from mlflow.tracking import MlflowClient
import os
import uuid
import shutil
import pandas as pd
# Create temp directory to download input data from MLflow
input_temp_dir = os.path.join(os.environ["SPARK_LOCAL_DIRS"], "tmp", str(uuid.uuid4())[:8])
os.makedirs(input_temp_dir)
# Download the artifact and read it into a pandas DataFrame
input_client = MlflowClient()
input_data_path = input_client.download_artifacts("c2dfe80b419d4a8dbc88a90e3274369a", "data", input_temp_dir)
df_loaded = pd.read_parquet(os.path.join(input_data_path, "training_data"))
# Delete the temp data
shutil.rmtree(input_temp_dir)
# Preview data
df_loaded.head(5)
# COMMAND ----------
df_loaded.head(1).to_dict()
# COMMAND ----------
# MAGIC %md
# MAGIC ### Select supported columns
# MAGIC Select only the columns that are supported. This allows us to train a model that can predict on a dataset that has extra columns that are not used in training.
# MAGIC `[]` are dropped in the pipelines. See the Alerts tab of the AutoML Experiment page for details on why these columns are dropped.
# COMMAND ----------
from databricks.automl_runtime.sklearn.column_selector import ColumnSelector
supported_cols = ["text_without_stopwords", "published", "language", "main_img_url", "site_url", "hasImage", "title_without_stopwords", "text", "title", "type", "author"]
col_selector = ColumnSelector(supported_cols)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Preprocessors
# COMMAND ----------
transformers = []
# COMMAND ----------
# MAGIC %md
# MAGIC ### Categorical columns
# COMMAND ----------
# MAGIC %md
# MAGIC #### Low-cardinality categoricals
# MAGIC Convert each low-cardinality categorical column into multiple binary columns through one-hot encoding.
# MAGIC For each input categorical column (string or numeric), the number of output columns is equal to the number of unique values in the input column.
# COMMAND ----------
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
one_hot_encoder = OneHotEncoder(handle_unknown="ignore")
transformers.append(("onehot", one_hot_encoder, ["published", "language", "site_url", "hasImage", "title", "title_without_stopwords", "text_without_stopwords"]))
# COMMAND ----------
# MAGIC %md
# MAGIC #### Medium-cardinality categoricals
# MAGIC Convert each medium-cardinality categorical column into a numerical representation.
# MAGIC Each string column is hashed to 1024 float columns.
# MAGIC Each numeric column is imputed with zeros.
# COMMAND ----------
from sklearn.feature_extraction import FeatureHasher
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
for feature in ["text", "main_img_url"]:
hash_transformer = Pipeline(steps=[
("imputer", SimpleImputer(missing_values=None, strategy="constant", fill_value="")),
(f"{feature}_hasher", FeatureHasher(n_features=1024, input_type="string"))])
transformers.append((f"{feature}_hasher", hash_transformer, [feature]))
# COMMAND ----------
# MAGIC %md
# MAGIC ### Text features
# MAGIC Convert each feature to a fixed-length vector using TF-IDF vectorization. The length of the output
# MAGIC vector is equal to 1024. Each column corresponds to one of the top word n-grams
# MAGIC where n is in the range [1, 2].
# COMMAND ----------
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
for col in {'type', 'author'}:
vectorizer = Pipeline(steps=[
("imputer", SimpleImputer(missing_values=None, strategy="constant", fill_value="")),
# Reshape to 1D since SimpleImputer changes the shape of the input to 2D
("reshape", FunctionTransformer(np.reshape, kw_args={"newshape":-1})),
("tfidf", TfidfVectorizer(decode_error="ignore", ngram_range = (1, 2), max_features=1024))])
transformers.append((f"text_{col}", vectorizer, [col]))
# COMMAND ----------
from sklearn.compose import ColumnTransformer
preprocessor = ColumnTransformer(transformers, remainder="passthrough", sparse_threshold=0)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Feature standardization
# MAGIC Scale all feature columns to be centered around zero with unit variance.
# COMMAND ----------
from sklearn.preprocessing import StandardScaler
standardizer = StandardScaler()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Train - Validation - Test Split
# MAGIC Split the input data into 3 sets:
# MAGIC - Train (60% of the dataset used to train the model)
# MAGIC - Validation (20% of the dataset used to tune the hyperparameters of the model)
# MAGIC - Test (20% of the dataset used to report the true performance of the model on an unseen dataset)
# COMMAND ----------
df_loaded.columns
# COMMAND ----------
from sklearn.model_selection import train_test_split
split_X = df_loaded.drop([target_col], axis=1)
split_y = df_loaded[target_col]
# Split out train data
X_train, split_X_rem, y_train, split_y_rem = train_test_split(split_X, split_y, train_size=0.6, random_state=799811440, stratify=split_y)
# Split remaining data equally for validation and test
X_val, X_test, y_val, y_test = train_test_split(split_X_rem, split_y_rem, test_size=0.5, random_state=799811440, stratify=split_y_rem)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Train classification model
# MAGIC - Log relevant metrics to MLflow to track runs
# MAGIC - All the runs are logged under [this MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false)
# MAGIC - Change the model parameters and re-run the training cell to log a different trial to the MLflow experiment
# MAGIC - To view the full list of tunable hyperparameters, check the output of the cell below
# COMMAND ----------
from xgboost import XGBClassifier
help(XGBClassifier)
# COMMAND ----------
import mlflow
import sklearn
from sklearn import set_config
from sklearn.pipeline import Pipeline
set_config(display="diagram")
xgbc_classifier = XGBClassifier(
colsample_bytree=0.7324555878929649,
learning_rate=0.007636627530856404,
max_depth=7,
min_child_weight=6,
n_estimators=106,
n_jobs=100,
subsample=0.6972187716458148,
verbosity=0,
random_state=799811440,
)
model = Pipeline([
("column_selector", col_selector),
("preprocessor", preprocessor),
("standardizer", standardizer),
("classifier", xgbc_classifier),
])
# Create a separate pipeline to transform the validation dataset. This is used for early stopping.
pipeline = Pipeline([
("column_selector", col_selector),
("preprocessor", preprocessor),
("standardizer", standardizer),
])
mlflow.sklearn.autolog(disable=True)
X_val_processed = pipeline.fit_transform(X_val, y_val)
model
# COMMAND ----------
# Enable automatic logging of input samples, metrics, parameters, and models
mlflow.sklearn.autolog(log_input_examples=True, silent=True)
with mlflow.start_run(run_name="xgboost") as mlflow_run:
model.fit(X_train, y_train, classifier__early_stopping_rounds=5, classifier__eval_set=[(X_val_processed,y_val)], classifier__verbose=False)
# Training metrics are logged by MLflow autologging
# Log metrics for the validation set
xgbc_val_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_val, y_val, prefix="val_")
# Log metrics for the test set
xgbc_test_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_test, y_test, prefix="test_")
# Display the logged metrics
xgbc_val_metrics = {k.replace("val_", ""): v for k, v in xgbc_val_metrics.items()}
xgbc_test_metrics = {k.replace("test_", ""): v for k, v in xgbc_test_metrics.items()}
display(pd.DataFrame([xgbc_val_metrics, xgbc_test_metrics], index=["validation", "test"]))
# COMMAND ----------
# Patch requisite packages to the model environment YAML for model serving
import os
import shutil
import uuid
import yaml
None
import xgboost
from mlflow.tracking import MlflowClient
xgbc_temp_dir = os.path.join(os.environ["SPARK_LOCAL_DIRS"], str(uuid.uuid4())[:8])
os.makedirs(xgbc_temp_dir)
xgbc_client = MlflowClient()
xgbc_model_env_path = xgbc_client.download_artifacts(mlflow_run.info.run_id, "model/conda.yaml", xgbc_temp_dir)
xgbc_model_env_str = open(xgbc_model_env_path)
xgbc_parsed_model_env_str = yaml.load(xgbc_model_env_str, Loader=yaml.FullLoader)
xgbc_parsed_model_env_str["dependencies"][-1]["pip"].append(f"xgboost=={xgboost.__version__}")
with open(xgbc_model_env_path, "w") as f:
f.write(yaml.dump(xgbc_parsed_model_env_str))
xgbc_client.log_artifact(run_id=mlflow_run.info.run_id, local_path=xgbc_model_env_path, artifact_path="model")
shutil.rmtree(xgbc_temp_dir)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Feature importance
# MAGIC
# MAGIC SHAP is a game-theoretic approach to explain machine learning models, providing a summary plot
# MAGIC of the relationship between features and model output. Features are ranked in descending order of
# MAGIC importance, and impact/color describe the correlation between the feature and the target variable.
# MAGIC - Generating SHAP feature importance is a very memory intensive operation, so to ensure that AutoML can run trials without
# MAGIC running out of memory, we disable SHAP by default.<br />
# MAGIC You can set the flag defined below to `shap_enabled = True` and re-run this notebook to see the SHAP plots.
# MAGIC - To reduce the computational overhead of each trial, a single example is sampled from the validation set to explain.<br />
# MAGIC For more thorough results, increase the sample size of explanations, or provide your own examples to explain.
# MAGIC - SHAP cannot explain models using data with nulls; if your dataset has any, both the background data and
# MAGIC examples to explain will be imputed using the mode (most frequent values). This affects the computed
# MAGIC SHAP values, as the imputed samples may not match the actual data distribution.
# MAGIC
# MAGIC For more information on how to read Shapley values, see the [SHAP documentation](https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html).
# COMMAND ----------
# Set this flag to True and re-run the notebook to see the SHAP plots
shap_enabled = True
# COMMAND ----------
if shap_enabled:
from shap import KernelExplainer, summary_plot
# SHAP cannot explain models using data with nulls.
# To enable SHAP to succeed, both the background data and examples to explain are imputed with the mode (most frequent values).
mode = X_train.mode().iloc[0]
# Sample background data for SHAP Explainer. Increase the sample size to reduce variance.
train_sample = X_train.sample(n=min(100, len(X_train.index))).fillna(mode)
# Sample a single example from the validation set to explain. Increase the sample size and rerun for more thorough results.
example = X_val.sample(n=1).fillna(mode)
# Use Kernel SHAP to explain feature importance on the example from the validation set.
predict = lambda x: model.predict_proba(pd.DataFrame(x, columns=X_train.columns))
explainer = KernelExplainer(predict, train_sample, link="logit")
shap_values = explainer.shap_values(example, l1_reg=False)
summary_plot(shap_values, example, class_names=model.classes_)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Inference
# MAGIC [The MLflow Model Registry](https://docs.databricks.com/applications/mlflow/model-registry.html) is a collaborative hub where teams can share ML models, work together from experimentation to online testing and production, integrate with approval and governance workflows, and monitor ML deployments and their performance. The snippets below show how to add the model trained in this notebook to the model registry and to retrieve it later for inference.
# MAGIC
# MAGIC > **NOTE:** The `model_uri` for the model already trained in this notebook can be found in the cell below
# MAGIC
# MAGIC ### Register to Model Registry
# MAGIC ```
# MAGIC model_name = "Example"
# MAGIC
# MAGIC model_uri = f"runs:/{ mlflow_run.info.run_id }/model"
# MAGIC registered_model_version = mlflow.register_model(model_uri, model_name)
# MAGIC ```
# MAGIC
# MAGIC ### Load from Model Registry
# MAGIC ```
# MAGIC model_name = "Example"
# MAGIC model_version = registered_model_version.version
# MAGIC
# MAGIC model = mlflow.pyfunc.load_model(model_uri=f"models:/{model_name}/{model_version}")
# MAGIC model.predict(input_X)
# MAGIC ```
# MAGIC
# MAGIC ### Load model without registering
# MAGIC ```
# MAGIC model_uri = f"runs:/{ mlflow_run.info.run_id }/model"
# MAGIC
# MAGIC model = mlflow.pyfunc.load_model(model_uri)
# MAGIC model.predict(input_X)
# MAGIC ```
# COMMAND ----------
# model_uri for the generated model
print(f"runs:/{ mlflow_run.info.run_id }/model")
# COMMAND ----------
# MAGIC %md
# MAGIC ### Loading model to make prediction
# COMMAND ----------
model_uri = f"runs:/51c0348482e042ea8e4b7983ab6bff99/model"
model = mlflow.pyfunc.load_model(model_uri)
#model.predict(input_X)
# COMMAND ----------
import pandas as pd
data = {'author': {0: 'bigjim.com'},
'published': {0: '2016-10-27T18:05:26.351+03:00'},
'title': {0: 'aliens are coming to invade earth'},
'text': {0: 'aliens are coming to invade earth'},
'language': {0: 'english'},
'site_url': {0: 'cnn.com'},
'main_img_url': {0: 'https://2.bp.blogspot.com/-0mdp0nZiwMI/UYwYvexmW2I/AAAAAAAAVQM/7C_X5WRE_mQ/w1200-h630-p-nu/Edison-Stock-Ticker.jpg'},
'type': {0: 'bs'},
'title_without_stopwords': {0: 'aliens are coming to invade earth'},
'text_without_stopwords': {0: 'aliens are coming to invade earth'},
'hasImage': {0: 1.0}}
df = pd.DataFrame(data=data)
df.head()
# COMMAND ----------
model.predict(df)
# COMMAND ----------
| 36.753695 | 461 | 0.743399 |
1694a3aec6658351c14a81b2e91e92955b6cb8a7
| 341 |
py
|
Python
|
lucky_guess/__init__.py
|
mfinzi/lucky-guess-chemist
|
01898b733dc7d026f70d0cb6337309cb600502fb
|
[
"MIT"
] | null | null | null |
lucky_guess/__init__.py
|
mfinzi/lucky-guess-chemist
|
01898b733dc7d026f70d0cb6337309cb600502fb
|
[
"MIT"
] | null | null | null |
lucky_guess/__init__.py
|
mfinzi/lucky-guess-chemist
|
01898b733dc7d026f70d0cb6337309cb600502fb
|
[
"MIT"
] | null | null | null |
import importlib
import pkgutil
__all__ = []
for loader, module_name, is_pkg in pkgutil.walk_packages(__path__):
module = importlib.import_module('.'+module_name,package=__name__)
try:
globals().update({k: getattr(module, k) for k in module.__all__})
__all__ += module.__all__
except AttributeError: continue
| 34.1 | 73 | 0.71261 |
1695439f6b89942d55b135dae20f140a0772199c
| 3,727 |
py
|
Python
|
shuffling_algorithm.py
|
BaptisteLafoux/aztec_tiling
|
413acd8751b8178942e91fbee32987f02bc5c695
|
[
"MIT"
] | null | null | null |
shuffling_algorithm.py
|
BaptisteLafoux/aztec_tiling
|
413acd8751b8178942e91fbee32987f02bc5c695
|
[
"MIT"
] | null | null | null |
shuffling_algorithm.py
|
BaptisteLafoux/aztec_tiling
|
413acd8751b8178942e91fbee32987f02bc5c695
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 30 22:04:48 2020
@author: baptistelafoux
"""
import domino
import numpy as np
import numpy.lib.arraysetops as aso
| 26.81295 | 117 | 0.499866 |
1696d983057a2d937827a5a96f9b5500cb3c490c
| 478 |
py
|
Python
|
scripts/matrix_operations.py
|
h3ct0r/gas_mapping_example
|
57bd8333b4832281fbb89019df440374e2b50b9b
|
[
"Unlicense"
] | 1 |
2022-02-28T21:55:23.000Z
|
2022-02-28T21:55:23.000Z
|
scripts/matrix_operations.py
|
ArghyaChatterjee/gas_mapping_kerneldm
|
57bd8333b4832281fbb89019df440374e2b50b9b
|
[
"Unlicense"
] | null | null | null |
scripts/matrix_operations.py
|
ArghyaChatterjee/gas_mapping_kerneldm
|
57bd8333b4832281fbb89019df440374e2b50b9b
|
[
"Unlicense"
] | 2 |
2021-12-14T05:15:18.000Z
|
2022-02-28T21:55:10.000Z
|
import numpy as np
| 26.555556 | 67 | 0.725941 |
16988b5f9f77ebb40b1eb37bef67f48bd826786d
| 121 |
py
|
Python
|
ShanghaiPower/build_up.py
|
biljiang/pyprojects
|
10095c6b8f2f32831e8a36e122d1799f135dc5df
|
[
"MIT"
] | null | null | null |
ShanghaiPower/build_up.py
|
biljiang/pyprojects
|
10095c6b8f2f32831e8a36e122d1799f135dc5df
|
[
"MIT"
] | null | null | null |
ShanghaiPower/build_up.py
|
biljiang/pyprojects
|
10095c6b8f2f32831e8a36e122d1799f135dc5df
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
from Cython.Build import cythonize
setup(ext_modules = cythonize(["license_chk.py"]))
| 20.166667 | 50 | 0.793388 |
169a6a92aa8a5f8b13f2ca7a2bc5a3d4390e96a9
| 6,363 |
py
|
Python
|
quantum/plugins/nicira/extensions/nvp_qos.py
|
yamt/neutron
|
f94126739a48993efaf1d1439dcd3dadb0c69742
|
[
"Apache-2.0"
] | null | null | null |
quantum/plugins/nicira/extensions/nvp_qos.py
|
yamt/neutron
|
f94126739a48993efaf1d1439dcd3dadb0c69742
|
[
"Apache-2.0"
] | null | null | null |
quantum/plugins/nicira/extensions/nvp_qos.py
|
yamt/neutron
|
f94126739a48993efaf1d1439dcd3dadb0c69742
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Aaron Rosen, Nicira Networks, Inc.
from abc import abstractmethod
from quantum.api import extensions
from quantum.api.v2 import attributes as attr
from quantum.api.v2 import base
from quantum.common import exceptions as qexception
from quantum import manager
# For policy.json/Auth
qos_queue_create = "create_qos_queue"
qos_queue_delete = "delete_qos_queue"
qos_queue_get = "get_qos_queue"
qos_queue_list = "get_qos_queues"
def convert_to_unsigned_int_or_none(val):
if val is None:
return
try:
val = int(val)
if val < 0:
raise ValueError
except (ValueError, TypeError):
msg = _("'%s' must be a non negative integer.") % val
raise qexception.InvalidInput(error_message=msg)
return val
# Attribute Map
RESOURCE_ATTRIBUTE_MAP = {
'qos_queues': {
'id': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'default': {'allow_post': True, 'allow_put': False,
'convert_to': attr.convert_to_boolean,
'is_visible': True, 'default': False},
'name': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'min': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': '0',
'convert_to': convert_to_unsigned_int_or_none},
'max': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': None,
'convert_to': convert_to_unsigned_int_or_none},
'qos_marking': {'allow_post': True, 'allow_put': False,
'validate': {'type:values': ['untrusted', 'trusted']},
'default': 'untrusted', 'is_visible': True},
'dscp': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': '0',
'convert_to': convert_to_unsigned_int_or_none},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': None},
'is_visible': True},
},
}
QUEUE = 'queue_id'
RXTX_FACTOR = 'rxtx_factor'
EXTENDED_ATTRIBUTES_2_0 = {
'ports': {
RXTX_FACTOR: {'allow_post': True,
'allow_put': False,
'is_visible': False,
'default': 1,
'convert_to': convert_to_unsigned_int_or_none},
QUEUE: {'allow_post': False,
'allow_put': False,
'is_visible': True,
'default': False}},
'networks': {QUEUE: {'allow_post': True,
'allow_put': True,
'is_visible': True,
'default': False}}
}
class QueuePluginBase(object):
| 31.191176 | 78 | 0.610718 |
169b6898f6bda824a9456c155bd29a6f84fdb9e8
| 251 |
py
|
Python
|
easyneuron/math/__init__.py
|
TrendingTechnology/easyneuron
|
b99822c7206a144a0ab61b3b6b5cddeaca1a3c6a
|
[
"Apache-2.0"
] | 1 |
2021-12-14T19:21:44.000Z
|
2021-12-14T19:21:44.000Z
|
easyneuron/math/__init__.py
|
TrendingTechnology/easyneuron
|
b99822c7206a144a0ab61b3b6b5cddeaca1a3c6a
|
[
"Apache-2.0"
] | null | null | null |
easyneuron/math/__init__.py
|
TrendingTechnology/easyneuron
|
b99822c7206a144a0ab61b3b6b5cddeaca1a3c6a
|
[
"Apache-2.0"
] | null | null | null |
"""easyneuron.math contains all of the maths tools that you'd ever need for your AI projects, when used alongside Numpy.
To suggest more to be added, please add an issue on the GitHub repo.
"""
from easyneuron.math.distance import euclidean_distance
| 41.833333 | 120 | 0.788845 |
169c6caecdf841a261ae5cbf1ce633a03edb8b3a
| 2,532 |
py
|
Python
|
tests/unit/concurrently/test_TaskPackageDropbox_put.py
|
shane-breeze/AlphaTwirl
|
59dbd5348af31d02e133d43fd5bfaad6b99a155e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/concurrently/test_TaskPackageDropbox_put.py
|
shane-breeze/AlphaTwirl
|
59dbd5348af31d02e133d43fd5bfaad6b99a155e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/concurrently/test_TaskPackageDropbox_put.py
|
shane-breeze/AlphaTwirl
|
59dbd5348af31d02e133d43fd5bfaad6b99a155e
|
[
"BSD-3-Clause"
] | null | null | null |
# Tai Sakuma <[email protected]>
import pytest
try:
import unittest.mock as mock
except ImportError:
import mock
from alphatwirl.concurrently import TaskPackageDropbox
##__________________________________________________________________||
##__________________________________________________________________||
def test_repr(obj):
repr(obj)
def test_open_terminate_close(workingarea, dispatcher):
obj = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01)
assert 0 == workingarea.open.call_count
assert 0 == workingarea.close.call_count
assert 0 == dispatcher.terminate.call_count
obj.open()
assert 1 == workingarea.open.call_count
assert 0 == workingarea.close.call_count
assert 0 == dispatcher.terminate.call_count
obj.terminate()
assert 1 == workingarea.open.call_count
assert 0 == workingarea.close.call_count
assert 1 == dispatcher.terminate.call_count
obj.close()
assert 1 == workingarea.open.call_count
assert 1 == workingarea.close.call_count
assert 1 == dispatcher.terminate.call_count
def test_put(obj, workingarea, dispatcher):
workingarea.put_package.side_effect = [0, 1] # pkgidx
dispatcher.run.side_effect = [1001, 1002] # runid
package0 = mock.MagicMock(name='package0')
package1 = mock.MagicMock(name='package1')
assert 0 == obj.put(package0)
assert 1 == obj.put(package1)
assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list
assert [mock.call(workingarea, 0), mock.call(workingarea, 1)] == dispatcher.run.call_args_list
def test_put_multiple(obj, workingarea, dispatcher):
workingarea.put_package.side_effect = [0, 1] # pkgidx
dispatcher.run_multiple.return_value = [1001, 1002] # runid
package0 = mock.MagicMock(name='package0')
package1 = mock.MagicMock(name='package1')
assert [0, 1] == obj.put_multiple([package0, package1])
assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list
assert [mock.call(workingarea, [0, 1])] == dispatcher.run_multiple.call_args_list
##__________________________________________________________________||
| 30.878049 | 98 | 0.742496 |
169dfe6f123a1bb92dcedefda60fdcdf0dde5b42
| 3,497 |
py
|
Python
|
networking_odl/tests/unit/dhcp/test_odl_dhcp_driver.py
|
gokarslan/networking-odl2
|
6a6967832b2c02dfcff6a9f0ab6e36472b849ce8
|
[
"Apache-2.0"
] | null | null | null |
networking_odl/tests/unit/dhcp/test_odl_dhcp_driver.py
|
gokarslan/networking-odl2
|
6a6967832b2c02dfcff6a9f0ab6e36472b849ce8
|
[
"Apache-2.0"
] | null | null | null |
networking_odl/tests/unit/dhcp/test_odl_dhcp_driver.py
|
gokarslan/networking-odl2
|
6a6967832b2c02dfcff6a9f0ab6e36472b849ce8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testscenarios
from networking_odl.common import constants as odl_const
from networking_odl.dhcp import odl_dhcp_driver
from networking_odl.ml2 import mech_driver_v2
from networking_odl.tests.unit.dhcp import test_odl_dhcp_driver_base
from oslo_config import cfg
load_tests = testscenarios.load_tests_apply_scenarios
cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
| 40.662791 | 78 | 0.637975 |
169ed0cf36c52beabffce88a57318686603b6c41
| 443 |
py
|
Python
|
users/migrations/0002_auto_20191113_1352.py
|
Dragonite/djangohat
|
68890703b1fc647785cf120ada281d6f3fcc4121
|
[
"MIT"
] | 2 |
2019-11-15T05:07:24.000Z
|
2019-11-15T10:27:48.000Z
|
users/migrations/0002_auto_20191113_1352.py
|
Dragonite/djangohat
|
68890703b1fc647785cf120ada281d6f3fcc4121
|
[
"MIT"
] | null | null | null |
users/migrations/0002_auto_20191113_1352.py
|
Dragonite/djangohat
|
68890703b1fc647785cf120ada281d6f3fcc4121
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.2 on 2019-11-13 13:52
from django.db import migrations, models
| 23.315789 | 119 | 0.625282 |
16a0f5c79d486ed958f66a4f801398499c8d9ff1
| 3,389 |
py
|
Python
|
premium/backend/src/baserow_premium/api/admin/dashboard/views.py
|
cjh0613/baserow
|
62871f5bf53c9d25446976031aacb706c0abe584
|
[
"MIT"
] | 839 |
2020-07-20T13:29:34.000Z
|
2022-03-31T21:09:16.000Z
|
premium/backend/src/baserow_premium/api/admin/dashboard/views.py
|
cjh0613/baserow
|
62871f5bf53c9d25446976031aacb706c0abe584
|
[
"MIT"
] | 28 |
2020-08-07T09:23:58.000Z
|
2022-03-01T22:32:40.000Z
|
premium/backend/src/baserow_premium/api/admin/dashboard/views.py
|
cjh0613/baserow
|
62871f5bf53c9d25446976031aacb706c0abe584
|
[
"MIT"
] | 79 |
2020-08-04T01:48:01.000Z
|
2022-03-27T13:30:54.000Z
|
from datetime import timedelta
from django.contrib.auth import get_user_model
from drf_spectacular.utils import extend_schema
from rest_framework.response import Response
from rest_framework.permissions import IsAdminUser
from rest_framework.views import APIView
from baserow.api.decorators import accept_timezone
from baserow.core.models import Group, Application
from baserow_premium.admin.dashboard.handler import AdminDashboardHandler
from .serializers import AdminDashboardSerializer
User = get_user_model()
| 38.078652 | 88 | 0.649159 |
16a20512bd62fea83ee40c49a4b7cc5fa386ce48
| 969 |
py
|
Python
|
src/clientOld.py
|
dan3612812/socketChatRoom
|
b0d548477687de2d9fd521826db9ea75e528de5c
|
[
"MIT"
] | null | null | null |
src/clientOld.py
|
dan3612812/socketChatRoom
|
b0d548477687de2d9fd521826db9ea75e528de5c
|
[
"MIT"
] | null | null | null |
src/clientOld.py
|
dan3612812/socketChatRoom
|
b0d548477687de2d9fd521826db9ea75e528de5c
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
import sys
import socket
import time
import threading
import select
HOST = '192.168.11.98'
PORT = int(sys.argv[1])
queue = []
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
queue.append(s)
print("add client to queue")
socketThread = threading.Thread(target=socketRecv)
socketThread.start()
# inputThread = Thread(target=inputJob)
# inputThread.start()
try:
while True:
data = input()
s.send(bytes(data, "utf-8"))
time.sleep(0.1)
except KeyboardInterrupt or EOFError:
print("in except")
# s.close() #
socketThread.do_run = False
# socketThread.join()
# inputThread.join()
print("close thread")
sys.exit(0)
| 19.38 | 53 | 0.627451 |
16a205ccc4af00539940fcbe977b97f31972c365
| 6,296 |
py
|
Python
|
plugins/anomali_threatstream/komand_anomali_threatstream/actions/import_observable/schema.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46 |
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/anomali_threatstream/komand_anomali_threatstream/actions/import_observable/schema.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386 |
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/anomali_threatstream/komand_anomali_threatstream/actions/import_observable/schema.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43 |
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
| 28.488688 | 215 | 0.513977 |
16a25b6b94677a9d90afcb9439df38171a1429af
| 25,083 |
py
|
Python
|
trove/tests/unittests/quota/test_quota.py
|
citrix-openstack-build/trove
|
52506396dd7bd095d1623d40cf2e67f2b478dc1d
|
[
"Apache-2.0"
] | null | null | null |
trove/tests/unittests/quota/test_quota.py
|
citrix-openstack-build/trove
|
52506396dd7bd095d1623d40cf2e67f2b478dc1d
|
[
"Apache-2.0"
] | null | null | null |
trove/tests/unittests/quota/test_quota.py
|
citrix-openstack-build/trove
|
52506396dd7bd095d1623d40cf2e67f2b478dc1d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from mockito import mock, when, unstub, any, verify, never, times
from mock import Mock
from trove.quota.quota import DbQuotaDriver
from trove.quota.models import Resource
from trove.quota.models import Quota
from trove.quota.models import QuotaUsage
from trove.quota.models import Reservation
from trove.db.models import DatabaseModelBase
from trove.extensions.mgmt.quota.service import QuotaController
from trove.common import exception
from trove.common import cfg
from trove.quota.quota import run_with_quotas
from trove.quota.quota import QUOTAS
"""
Unit tests for the classes and functions in DbQuotaDriver.py.
"""
CONF = cfg.CONF
resources = {
Resource.INSTANCES: Resource(Resource.INSTANCES, 'max_instances_per_user'),
Resource.VOLUMES: Resource(Resource.VOLUMES, 'max_volumes_per_user'),
}
FAKE_TENANT1 = "123456"
FAKE_TENANT2 = "654321"
| 42.513559 | 79 | 0.574812 |
16a2ce4183cf617439f69c8fd39f2dded2cf7d88
| 180 |
py
|
Python
|
analisador_sintatico/blueprints/api/parsers.py
|
viniciusandd/uri-analisador-sintatico
|
b347f4293e4c60bd3b2c838c8cef0d75db2c0bec
|
[
"MIT"
] | null | null | null |
analisador_sintatico/blueprints/api/parsers.py
|
viniciusandd/uri-analisador-sintatico
|
b347f4293e4c60bd3b2c838c8cef0d75db2c0bec
|
[
"MIT"
] | null | null | null |
analisador_sintatico/blueprints/api/parsers.py
|
viniciusandd/uri-analisador-sintatico
|
b347f4293e4c60bd3b2c838c8cef0d75db2c0bec
|
[
"MIT"
] | null | null | null |
from flask_restful import reqparse
| 25.714286 | 64 | 0.738889 |
16a3072f25578896e1189f9fac5976e0586e6b47
| 6,369 |
py
|
Python
|
demo_large_image.py
|
gunlyungyou/AerialDetection
|
a5606acd8e9a5f7b10cd76bd4b0c3b8c7630fb26
|
[
"Apache-2.0"
] | 9 |
2020-10-08T19:51:17.000Z
|
2022-02-16T12:58:01.000Z
|
demo_large_image.py
|
gunlyungyou/AerialDetection
|
a5606acd8e9a5f7b10cd76bd4b0c3b8c7630fb26
|
[
"Apache-2.0"
] | null | null | null |
demo_large_image.py
|
gunlyungyou/AerialDetection
|
a5606acd8e9a5f7b10cd76bd4b0c3b8c7630fb26
|
[
"Apache-2.0"
] | 8 |
2020-09-25T14:47:55.000Z
|
2022-02-16T12:31:13.000Z
|
from mmdet.apis import init_detector, inference_detector, show_result, draw_poly_detections
import mmcv
from mmcv import Config
from mmdet.datasets import get_dataset
import cv2
import os
import numpy as np
from tqdm import tqdm
import DOTA_devkit.polyiou as polyiou
import math
import pdb
CLASS_NAMES_KR = (' ', ' ', ' ', ' ', ' ', '', '', '', '',
'', '', '', '', '', ' ')
CLASS_NAMES_EN = ('small ship', 'large ship', 'civil airplane', 'military airplane', 'small car', 'bus', 'truck', 'train',
'crane', 'bridge', 'oiltank', 'dam', 'stadium', 'helipad', 'roundabout')
CLASS_MAP = {k:v for k, v in zip(CLASS_NAMES_KR, CLASS_NAMES_EN)}
if __name__ == '__main__':
#roitransformer = DetectorModel(r'configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota.py',
# r'work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota/epoch_12.pth')
#roitransformer = DetectorModel(r'configs/roksi2020/retinanet_obb_r50_fpn_2x_roksi2020_mgpu.py',
# r'work_dirs/retinanet_obb_r50_fpn_2x_roksi2020_mgpu/epoch_24.pth')
roitransformer = DetectorModel(r'configs/roksi2020/faster_rcnn_RoITrans_r50_fpn_2x_roksi.py',
r'work_dirs/faster_rcnn_RoITrans_r50_fpn_2x_roksi/epoch_24.pth')
from glob import glob
roksis = glob('data/roksi2020/val/images/*.png')
#target = roksis[1]
#out = target.split('/')[-1][:-4]+'_out.jpg'
#roitransformer.inference_single_vis(target,
# os.path.join('demo', out),
# (512, 512),
# (1024, 1024))
for target in roksis[:100]:
out = target.split('/')[-1][:-4]+'_out.jpg'
print(os.path.join('demo/fasterrcnn', out))
roitransformer.inference_single_vis(target,
os.path.join('demo/fasterrcnn', out),
(512, 512),
(1024, 1024))
#roitransformer.inference_single_vis(r'demo/P0009.jpg',
# r'demo/P0009_out.jpg',
# (512, 512),
# (1024, 1024))
| 43.326531 | 122 | 0.551892 |
16a329d42e5fe4d870ae6840dac571c4c4bd741b
| 221 |
py
|
Python
|
ImageSearcher/admin.py
|
carpensa/dicom-harpooner
|
2d998c22c51e372fb9b5f3508c900af6f4405cd3
|
[
"BSD-3-Clause"
] | 1 |
2021-05-24T21:45:05.000Z
|
2021-05-24T21:45:05.000Z
|
ImageSearcher/admin.py
|
carpensa/dicom-harpooner
|
2d998c22c51e372fb9b5f3508c900af6f4405cd3
|
[
"BSD-3-Clause"
] | null | null | null |
ImageSearcher/admin.py
|
carpensa/dicom-harpooner
|
2d998c22c51e372fb9b5f3508c900af6f4405cd3
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from dicoms.models import Subject
from dicoms.models import Session
from dicoms.models import Series
admin.site.register(Session)
admin.site.register(Subject)
admin.site.register(Series)
| 24.555556 | 33 | 0.837104 |
16a335de057546c0e95c5699aa9470bc30a7f928
| 334 |
py
|
Python
|
src/djangoreactredux/wsgi.py
|
noscripter/django-react-redux-jwt-base
|
078fb86005db106365df51fa11d8602fa432e3c3
|
[
"MIT"
] | 4 |
2016-07-03T08:18:45.000Z
|
2018-12-25T07:47:41.000Z
|
src/djangoreactredux/wsgi.py
|
noscripter/django-react-redux-jwt-base
|
078fb86005db106365df51fa11d8602fa432e3c3
|
[
"MIT"
] | 2 |
2021-03-20T00:02:08.000Z
|
2021-06-10T23:34:26.000Z
|
src/djangoreactredux/wsgi.py
|
noscripter/django-react-redux-jwt-base
|
078fb86005db106365df51fa11d8602fa432e3c3
|
[
"MIT"
] | 1 |
2019-08-02T14:51:41.000Z
|
2019-08-02T14:51:41.000Z
|
"""
WSGI config for django-react-redux-jwt-base project.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoreactredux.settings.dev")
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| 23.857143 | 80 | 0.820359 |
16a40296272a4a2617c7e2666b6828a4cb958030
| 1,414 |
py
|
Python
|
simple_settings/dynamic_settings/base.py
|
matthewh/simple-settings
|
dbddf8d5be7096ee7c4c3cc6d82824befa9b714f
|
[
"MIT"
] | null | null | null |
simple_settings/dynamic_settings/base.py
|
matthewh/simple-settings
|
dbddf8d5be7096ee7c4c3cc6d82824befa9b714f
|
[
"MIT"
] | null | null | null |
simple_settings/dynamic_settings/base.py
|
matthewh/simple-settings
|
dbddf8d5be7096ee7c4c3cc6d82824befa9b714f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import re
from copy import deepcopy
import jsonpickle
| 28.28 | 68 | 0.609618 |
16a410bbf9dbba9b62a772c35376b67270885de8
| 3,981 |
py
|
Python
|
scripts/map_frame_to_utm_tf_publisher.py
|
coincar-sim/lanelet2_interface_ros
|
f1738766dd323ed64a4ebcc8254438920a587b80
|
[
"BSD-3-Clause"
] | 7 |
2019-03-27T03:59:50.000Z
|
2021-10-17T10:46:29.000Z
|
scripts/map_frame_to_utm_tf_publisher.py
|
coincar-sim/lanelet2_interface_ros
|
f1738766dd323ed64a4ebcc8254438920a587b80
|
[
"BSD-3-Clause"
] | 6 |
2019-04-13T15:55:55.000Z
|
2021-06-01T21:08:18.000Z
|
scripts/map_frame_to_utm_tf_publisher.py
|
coincar-sim/lanelet2_interface_ros
|
f1738766dd323ed64a4ebcc8254438920a587b80
|
[
"BSD-3-Clause"
] | 4 |
2021-03-25T09:22:55.000Z
|
2022-03-22T05:40:49.000Z
|
#!/usr/bin/env python
#
# Copyright (c) 2018
# FZI Forschungszentrum Informatik, Karlsruhe, Germany (www.fzi.de)
# KIT, Institute of Measurement and Control, Karlsruhe, Germany (www.mrt.kit.edu)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import roslib
import rospy
import tf
import tf2_ros
import geometry_msgs.msg
import lanelet2
stb = None
static_transform = None
lat_origin = None
lon_origin = None
map_frame_id = None
actual_utm_with_no_offset_frame_id = None
if __name__ == '__main__':
rospy.init_node('map_frame_to_utm_tf_publisher')
if not wait_for_params_successful():
rospy.logerr("map_frame_to_utm_tf_publisher: Could not initialize")
exit()
origin_latlon = lanelet2.core.GPSPoint(lat_origin, lon_origin)
projector = lanelet2.projection.UtmProjector(
lanelet2.io.Origin(origin_latlon), False, False)
origin_xy = projector.forward(origin_latlon)
stb = tf2_ros.TransformBroadcaster()
static_transform = geometry_msgs.msg.TransformStamped()
static_transform.header.stamp = rospy.Time.now()
static_transform.header.frame_id = map_frame_id
static_transform.child_frame_id = actual_utm_with_no_offset_frame_id
static_transform.transform.translation.x = -origin_xy.x
static_transform.transform.translation.y = -origin_xy.y
static_transform.transform.translation.z = 0.0
q = tf.transformations.quaternion_from_euler(0, 0, 0)
static_transform.transform.rotation.x = q[0]
static_transform.transform.rotation.y = q[1]
static_transform.transform.rotation.z = q[2]
static_transform.transform.rotation.w = q[3]
rospy.Timer(rospy.Duration(1.), timer_callback)
rospy.spin()
| 38.278846 | 85 | 0.757096 |
16a5b8fdf510e7bdeb3c6bd8d9c144db7f897552
| 52 |
py
|
Python
|
lectures/05-python-intro/examples/argv.py
|
mattmiller899/biosys-analytics
|
ab24a4c7206ed9a865e896daa57cee3c4e62df1f
|
[
"MIT"
] | 14 |
2019-07-14T08:29:04.000Z
|
2022-03-07T06:33:26.000Z
|
lectures/05-python-intro/examples/argv.py
|
mattmiller899/biosys-analytics
|
ab24a4c7206ed9a865e896daa57cee3c4e62df1f
|
[
"MIT"
] | 4 |
2020-03-24T18:25:26.000Z
|
2021-08-23T20:44:07.000Z
|
lectures/05-python-intro/examples/argv.py
|
mattmiller899/biosys-analytics
|
ab24a4c7206ed9a865e896daa57cee3c4e62df1f
|
[
"MIT"
] | 33 |
2019-01-05T17:03:47.000Z
|
2019-11-11T20:48:24.000Z
|
#!/usr/bin/env python3
import sys
print(sys.argv)
| 8.666667 | 22 | 0.711538 |
16a6cc579db685a8a411c51c09771255b3e6c2c9
| 366 |
py
|
Python
|
tests/fixtures.py
|
easyas314159/cnftools
|
67896cf3d17587accfc5ad7e30730fea2394f558
|
[
"MIT"
] | null | null | null |
tests/fixtures.py
|
easyas314159/cnftools
|
67896cf3d17587accfc5ad7e30730fea2394f558
|
[
"MIT"
] | null | null | null |
tests/fixtures.py
|
easyas314159/cnftools
|
67896cf3d17587accfc5ad7e30730fea2394f558
|
[
"MIT"
] | null | null | null |
from itertools import chain
| 21.529412 | 52 | 0.734973 |
16a762cb2b4ddc4c0f253e56da58680346091ea8
| 7,879 |
py
|
Python
|
applications/FluidDynamicsApplication/tests/sod_shock_tube_test.py
|
Rodrigo-Flo/Kratos
|
f718cae5d1618e9c0e7ed1da9e95b7a853e62b1b
|
[
"BSD-4-Clause"
] | null | null | null |
applications/FluidDynamicsApplication/tests/sod_shock_tube_test.py
|
Rodrigo-Flo/Kratos
|
f718cae5d1618e9c0e7ed1da9e95b7a853e62b1b
|
[
"BSD-4-Clause"
] | null | null | null |
applications/FluidDynamicsApplication/tests/sod_shock_tube_test.py
|
Rodrigo-Flo/Kratos
|
f718cae5d1618e9c0e7ed1da9e95b7a853e62b1b
|
[
"BSD-4-Clause"
] | null | null | null |
# Import kratos core and applications
import KratosMultiphysics
import KratosMultiphysics.KratosUnittest as KratosUnittest
import KratosMultiphysics.kratos_utilities as KratosUtilities
from KratosMultiphysics.FluidDynamicsApplication.fluid_dynamics_analysis import FluidDynamicsAnalysis
if __name__ == '__main__':
test = SodShockTubeTest()
test.setUp()
# test.testSodShockTubeExplicitASGS()
test.testSodShockTubeExplicitASGSShockCapturing()
# test.testSodShockTubeExplicitOSS()
# test.testSodShockTubeExplicitOSSShockCapturing()
test.runTest()
test.tearDown()
| 48.635802 | 142 | 0.615053 |
16a7758cb5092239aa048ae598f5849367159b11
| 647 |
py
|
Python
|
src/controllers/__init__.py
|
TonghanWang/NDQ
|
575f2e243bac1a567c072dbea8e093aaa4959511
|
[
"Apache-2.0"
] | 63 |
2020-02-23T09:37:15.000Z
|
2022-01-17T01:30:50.000Z
|
src/controllers/__init__.py
|
fringsoo/NDQ
|
e243ba917e331065e82c6634cb1d756873747be5
|
[
"Apache-2.0"
] | 14 |
2020-04-20T02:20:11.000Z
|
2022-03-12T00:16:33.000Z
|
src/controllers/__init__.py
|
mig-zh/NDQ
|
5720e3e8b529724e8d96a9a24c73bca24a11e7f9
|
[
"Apache-2.0"
] | 16 |
2020-03-12T02:57:52.000Z
|
2021-11-27T13:07:08.000Z
|
from .basic_controller import BasicMAC
from .cate_broadcast_comm_controller import CateBCommMAC
from .cate_broadcast_comm_controller_full import CateBCommFMAC
from .cate_broadcast_comm_controller_not_IB import CateBCommNIBMAC
from .tar_comm_controller import TarCommMAC
from .cate_pruned_broadcast_comm_controller import CatePBCommMAC
REGISTRY = {"basic_mac": BasicMAC,
"cate_broadcast_comm_mac": CateBCommMAC,
"cate_broadcast_comm_mac_full": CateBCommFMAC,
"cate_broadcast_comm_mac_not_IB": CateBCommNIBMAC,
"tar_comm_mac": TarCommMAC,
"cate_pruned_broadcast_comm_mac": CatePBCommMAC}
| 46.214286 | 66 | 0.797527 |
16a89cacbc82dd93659b9a841883e22a139d8576
| 447 |
py
|
Python
|
main.py
|
1999foxes/run-cmd-from-websocket
|
0e2a080fe92b93c6cba63dfe5649ac2a3e745009
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
1999foxes/run-cmd-from-websocket
|
0e2a080fe92b93c6cba63dfe5649ac2a3e745009
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
1999foxes/run-cmd-from-websocket
|
0e2a080fe92b93c6cba63dfe5649ac2a3e745009
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import json
import logging
import websockets
logging.basicConfig()
if __name__ == "__main__":
asyncio.run(main())
| 17.88 | 60 | 0.657718 |
16a8a652721deb01765dac84306cf8e790d8b09a
| 3,998 |
py
|
Python
|
3d_Vnet/3dvnet.py
|
GingerSpacetail/Brain-Tumor-Segmentation-and-Survival-Prediction-using-Deep-Neural-Networks
|
f627ce48e44bcc7d295ee1cf4086bfdfd7705d44
|
[
"MIT"
] | 100 |
2020-05-21T10:23:31.000Z
|
2022-03-26T18:26:38.000Z
|
3d_Vnet/3dvnet.py
|
GingerSpacetail/Brain-Tumor-Segmentation-and-Survival-Prediction-using-Deep-Neural-Networks
|
f627ce48e44bcc7d295ee1cf4086bfdfd7705d44
|
[
"MIT"
] | 3 |
2020-08-19T18:14:01.000Z
|
2021-01-04T09:53:07.000Z
|
3d_Vnet/3dvnet.py
|
GingerSpacetail/Brain-Tumor-Segmentation-and-Survival-Prediction-using-Deep-Neural-Networks
|
f627ce48e44bcc7d295ee1cf4086bfdfd7705d44
|
[
"MIT"
] | 25 |
2020-09-05T04:19:22.000Z
|
2022-02-09T19:30:29.000Z
|
import random
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
import tensorflow as tf
import keras.backend as K
from keras.utils import to_categorical
from keras import metrics
from keras.models import Model, load_model
from keras.layers import Input, BatchNormalization, Activation, Dense, Dropout,Maximum
from keras.layers.core import Lambda, RepeatVector, Reshape
from keras.layers.convolutional import Conv2D, Conv2DTranspose,Conv3D,Conv3DTranspose
from keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D,MaxPooling3D
from keras.layers.merge import concatenate, add
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from skimage.io import imread, imshow, concatenate_images
from skimage.transform import resize
from sklearn.utils import class_weight
from keras.callbacks import ModelCheckpoint
from keras.callbacks import CSVLogger
from keras.callbacks import EarlyStopping
from keras.layers.advanced_activations import PReLU
import os
from skimage.io import imread, imshow, concatenate_images
from skimage.transform import resize
# from medpy.io import load
import numpy as np
#import cv2
import nibabel as nib
from PIL import Image
| 34.465517 | 118 | 0.693847 |
16a92c971c54838ec2fe27ba303cf0b8622f86ad
| 347 |
py
|
Python
|
vk/types/additional/active_offer.py
|
Inzilkin/vk.py
|
969f01e666c877c1761c3629a100768f93de27eb
|
[
"MIT"
] | 24 |
2019-09-13T15:30:09.000Z
|
2022-03-09T06:35:59.000Z
|
vk/types/additional/active_offer.py
|
Inzilkin/vk.py
|
969f01e666c877c1761c3629a100768f93de27eb
|
[
"MIT"
] | null | null | null |
vk/types/additional/active_offer.py
|
Inzilkin/vk.py
|
969f01e666c877c1761c3629a100768f93de27eb
|
[
"MIT"
] | 12 |
2019-09-13T15:30:31.000Z
|
2022-03-01T10:13:32.000Z
|
from ..base import BaseModel
# returned from https://vk.com/dev/account.getActiveOffers
| 21.6875 | 58 | 0.665706 |
16a9cd5f8c3947e5f770014cb07528f411173928
| 18,818 |
py
|
Python
|
lib/networks/Resnet50_train.py
|
yangxue0827/TF_Deformable_Net
|
00c86380fd2725ebe7ae22f41d460ffc0bca378d
|
[
"MIT"
] | 193 |
2017-07-19T14:29:38.000Z
|
2021-10-20T07:35:42.000Z
|
lib/networks/Resnet50_train.py
|
yangxue0827/TF_Deformable_Net
|
00c86380fd2725ebe7ae22f41d460ffc0bca378d
|
[
"MIT"
] | 29 |
2017-07-24T10:07:22.000Z
|
2020-01-03T20:38:36.000Z
|
lib/networks/Resnet50_train.py
|
Zardinality/TF_Deformable_Net
|
00c86380fd2725ebe7ae22f41d460ffc0bca378d
|
[
"MIT"
] | 67 |
2017-07-27T14:32:47.000Z
|
2021-12-27T13:10:37.000Z
|
# --------------------------------------------------------
# TFFRCNN - Resnet50
# Copyright (c) 2016
# Licensed under The MIT License [see LICENSE for details]
# Written by miraclebiu
# --------------------------------------------------------
import tensorflow as tf
from .network import Network
from ..fast_rcnn.config import cfg
| 58.080247 | 189 | 0.597619 |
16aafc257a8e2aae93d3cae037dc8cf239e63a42
| 20,180 |
py
|
Python
|
lib/aws_sso_lib/assignments.py
|
vdesjardins/aws-sso-util
|
bf092a21674e8286c4445df7f4aae8ad061444ca
|
[
"Apache-2.0"
] | 330 |
2020-11-11T15:53:22.000Z
|
2022-03-30T06:45:57.000Z
|
lib/aws_sso_lib/assignments.py
|
vdesjardins/aws-sso-util
|
bf092a21674e8286c4445df7f4aae8ad061444ca
|
[
"Apache-2.0"
] | 47 |
2020-11-11T01:32:29.000Z
|
2022-03-30T01:33:28.000Z
|
lib/aws_sso_lib/assignments.py
|
vdesjardins/aws-sso-util
|
bf092a21674e8286c4445df7f4aae8ad061444ca
|
[
"Apache-2.0"
] | 23 |
2020-11-25T14:12:37.000Z
|
2022-03-30T02:16:26.000Z
|
import re
import numbers
import collections
import logging
from collections.abc import Iterable
import itertools
import aws_error_utils
from .lookup import Ids, lookup_accounts_for_ou
from .format import format_account_id
LOGGER = logging.getLogger(__name__)
_Context = collections.namedtuple("_Context", [
"session",
"ids",
"principal",
"principal_filter",
"permission_set",
"permission_set_filter",
"target",
"target_filter",
"get_principal_names",
"get_permission_set_names",
"get_target_names",
"ou_recursive",
"cache",
"filter_cache"
])
def _get_single_target_iterator(target, context: _Context):
target_type = target[0]
if target_type == "AWS_ACCOUNT":
return _get_account_iterator(target, context)
elif target_type == "AWS_OU":
return _get_ou_iterator(target, context)
else:
raise TypeError(f"Invalid target type {target_type}")
def _get_permission_set_iterator(context: _Context):
if context.permission_set:
iterables = [_get_single_permission_set_iterator(ps, context) for ps in context.permission_set]
return permission_set_iterator
else:
LOGGER.debug("Iterating for all permission sets")
return _get_all_permission_sets_iterator(context)
Assignment = collections.namedtuple("Assignment", [
"instance_arn",
"principal_type",
"principal_id",
"principal_name",
"permission_set_arn",
"permission_set_name",
"target_type",
"target_id",
"target_name",
])
def list_assignments(
session,
instance_arn=None,
identity_store_id=None,
principal=None,
principal_filter=None,
permission_set=None,
permission_set_filter=None,
target=None,
target_filter=None,
get_principal_names=False,
get_permission_set_names=False,
get_target_names=False,
ou_recursive=False):
"""Iterate over AWS SSO assignments.
Args:
session (boto3.Session): boto3 session to use
instance_arn (str): The SSO instance to use, or it will be looked up using ListInstances
identity_store_id (str): The identity store to use if principal names are being retrieved
or it will be looked up using ListInstances
principal: A principal specification or list of principal specifications.
A principal specification is a principal id or a 2-tuple of principal type and id.
principal_filter: A callable taking principal type, principal id, and principal name
(which may be None), and returning True if the principal should be included.
permission_set: A permission set arn or id, or a list of the same.
permission_set_filter: A callable taking permission set arn and name (name may be None),
returning True if the permission set should be included.
target: A target specification or list of target specifications.
A target specification is an account or OU id, or a 2-tuple of target type, which
is either AWS_ACCOUNT or AWS_OU, and target id.
target_filter: A callable taking target type, target id, and target name
(which may be None), and returning True if the target should be included.
get_principal_names (bool): Retrieve names for principals in assignments.
get_permission_set_names (bool): Retrieve names for permission sets in assignments.
get_target_names (bool): Retrieve names for targets in assignments.
ou_recursive (bool): Set to True if an OU is provided as a target to get all accounts
including those in child OUs.
Returns:
An iterator over Assignment namedtuples
"""
ids = Ids(lambda: session, instance_arn, identity_store_id)
return _list_assignments(
session,
ids,
principal=principal,
principal_filter=principal_filter,
permission_set=permission_set,
permission_set_filter=permission_set_filter,
target=target,
target_filter=target_filter,
get_principal_names=get_principal_names,
get_permission_set_names=get_permission_set_names,
get_target_names=get_target_names,
ou_recursive=ou_recursive,
)
if __name__ == "__main__":
import boto3
import sys
import json
logging.basicConfig(level=logging.INFO)
kwargs = {}
for v in sys.argv[1:]:
if hasattr(logging, v):
LOGGER.setLevel(getattr(logging, v))
else:
kwargs = json.loads(v)
kwargs["target_filter"] = fil
try:
session = boto3.Session()
print(",".join(Assignment._fields))
for value in list_assignments(session, **kwargs):
print(",".join(v or "" for v in value))
except KeyboardInterrupt:
pass
| 41.608247 | 147 | 0.637413 |
16abab9c314c051765ffd991fb6c764e6cf24cb5
| 235 |
py
|
Python
|
solutions/pic_search/webserver/src/service/theardpool.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | 1 |
2020-03-10T07:43:08.000Z
|
2020-03-10T07:43:08.000Z
|
solutions/pic_search/webserver/src/service/theardpool.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | null | null | null |
solutions/pic_search/webserver/src/service/theardpool.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | 1 |
2020-04-03T05:24:47.000Z
|
2020-04-03T05:24:47.000Z
|
import threading
from concurrent.futures import ThreadPoolExecutor
from service.train import do_train
| 26.111111 | 49 | 0.795745 |
16ac3137138a7e3b002c9c9337af2623d4ef26d0
| 2,600 |
py
|
Python
|
buildutil/main.py
|
TediCreations/buildutils
|
49a35e0926baf65f7688f89e53f525812540101c
|
[
"MIT"
] | null | null | null |
buildutil/main.py
|
TediCreations/buildutils
|
49a35e0926baf65f7688f89e53f525812540101c
|
[
"MIT"
] | null | null | null |
buildutil/main.py
|
TediCreations/buildutils
|
49a35e0926baf65f7688f89e53f525812540101c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import argparse
import subprocess
if __name__ == '__main__':
from version import __version__
from configParser import ConfigParser
else:
from .version import __version__
from .configParser import ConfigParser
def command(cmd):
"""Run a shell command"""
subprocess.call(cmd, shell=True)
"""
cmd_split = cmd.split()
process = subprocess.Popen(cmd_split,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = process.communicate()
return stdout, stderr
"""
if __name__ == '__main__':
main()
| 20.967742 | 65 | 0.672692 |
16ad65c0a3b3c48d1d5528a704a36242b69e1b30
| 590 |
py
|
Python
|
python/get_links.py
|
quiddity-wp/mediawiki-api-demos
|
98910dbd9c2cbbb13db790f3e8979419aeab34d4
|
[
"MIT"
] | 63 |
2019-05-19T13:22:37.000Z
|
2022-03-30T13:21:40.000Z
|
python/get_links.py
|
quiddity-wp/mediawiki-api-demos
|
98910dbd9c2cbbb13db790f3e8979419aeab34d4
|
[
"MIT"
] | 67 |
2019-05-03T17:17:19.000Z
|
2021-06-21T11:02:10.000Z
|
python/get_links.py
|
quiddity-wp/mediawiki-api-demos
|
98910dbd9c2cbbb13db790f3e8979419aeab34d4
|
[
"MIT"
] | 49 |
2019-02-19T09:28:33.000Z
|
2019-03-24T04:36:53.000Z
|
#This file is auto-generated. See modules.json and autogenerator.py for details
#!/usr/bin/python3
"""
get_links.py
MediaWiki API Demos
Demo of `Links` module: Get all links on the given page(s)
MIT License
"""
import requests
S = requests.Session()
URL = "https://en.wikipedia.org/w/api.php"
PARAMS = {
"action": "query",
"format": "json",
"titles": "Albert Einstein",
"prop": "links"
}
R = S.get(url=URL, params=PARAMS)
DATA = R.json()
PAGES = DATA["query"]["pages"]
for k, v in PAGES.items():
for l in v["links"]:
print(l["title"])
| 16.857143 | 79 | 0.618644 |
16adc3c8486e2f9e557cbef70e8a437e66aeb740
| 19,267 |
py
|
Python
|
gautools/submit_gaussian.py
|
thompcinnamon/QM-calc-scripts
|
60b06e14b2efd307d419201079bb24152ab0bd3c
|
[
"Apache-2.0"
] | null | null | null |
gautools/submit_gaussian.py
|
thompcinnamon/QM-calc-scripts
|
60b06e14b2efd307d419201079bb24152ab0bd3c
|
[
"Apache-2.0"
] | 2 |
2018-07-18T19:53:08.000Z
|
2019-02-25T23:25:51.000Z
|
gautools/submit_gaussian.py
|
theavey/QM-calc-scripts
|
60b06e14b2efd307d419201079bb24152ab0bd3c
|
[
"Apache-2.0"
] | 1 |
2017-01-04T20:50:21.000Z
|
2017-01-04T20:50:21.000Z
|
#! /usr/bin/env python3
########################################################################
# #
# This script was written by Thomas Heavey in 2015. #
# [email protected] [email protected] #
# #
# Copyright 2015 Thomas J. Heavey IV #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
# This is written to work with python 3 because it should be good to
# be working on the newest version of python.
from __future__ import print_function
import argparse # For parsing commandline arguments
import datetime
import glob # Allows referencing file system/file names
import os
import re
import readline # Allows easier file input (with tab completion?)
import subprocess # Allows for submitting commands to the shell
from warnings import warn
from thtools import cd, make_obj_dir, save_obj, resolve_path
yes = ['y', 'yes', '1']
# An input function that can prefill in the text entry
# Not sure if this works in 3.5+ because raw_input is gone
def create_gau_input(coord_name, template, verbose=True):
"""
make gaussian input file by combining header and coordinates files
This function takes as input a file with a set of molecular
coordinates (the form should not matter, it will just be copied
into the next file) and a template file that should be the header
for the desired calculation (including charge and multiplicity),
returns the name of the file, and creates a Gaussian input file ending
with '.com'
:param str coord_name: name of file with coordinates in a format
Gaussian can read
:param str template: name of file with header for Gaussian calculation
(up to and including the charge and multiplicity)
:param bool verbose: If True, some status messages will be printed
(including file names)
:return: name of the written file
:rtype: str
"""
if verbose:
print('Creating Gaussian input file...')
_out_name = coord_name.rsplit('.', 1)[0] + '.com'
with open(_out_name, 'w') as out_file:
with open(template, 'r') as templ_file:
if verbose:
print('opened {}'.format(template))
for line in templ_file:
out_file.write(line)
if '\n' not in line:
out_file.write('\n')
with open(coord_name, 'r') as in_file:
if verbose:
print('opened {}'.format(coord_name))
for i, line in enumerate(in_file):
if i < 2:
# ignore first two lines
# number of atoms and the title/comment
continue
# if line.strip().isdigit():
# # the first line is the number of atoms
# continue
# # XYZ files created by mathematica have a comment
# # as the second line saying something like:
# # "Created by mathematica". Obv. want to ignore that
# if line.strip().startswith('Create') or
# line.strip().startswith('generated'):
# continue
# else:
out_file.write(line)
out_file.write('\n\n\n')
if verbose:
print('created Gaussian input file {}'.format(_out_name))
return _out_name
def write_sub_script(input_name, num_cores=16, time='12:00:00', verbose=False,
mem='125', executable='g09',
chk_file=None, copy_chk=False,
ln_running=None,
hold_jid=None, xyz=None, make_xyz=None, make_input=False,
ugt_dict=None):
"""
Write submission script for (Gaussian) jobs for submission to queue
If make_xyz is not None, the file make_xyz will be checked to exist
first to make sure to not waste time when missing a necessary input file.
:param str input_name: Name of the file to use as input
:param int num_cores: Number of cores to request
:param str time: Amount of time to request in the format 'hh:mm:ss'
:param bool verbose: If True, print out some status messages and such
:type mem: int or str
:param mem: Minimum amount of memory to request
:param str executable: Executable file to use for the job
Example, 'g09', 'g16'
:param str chk_file: If not None, this file will be copied back after the
job has completed. If this is not None and make_input is True,
this will also be passed to use_gen_template.
:param bool copy_chk: If this is True, the script will attempt to copy
what should be an existing checkpoint file to the scratch directory
before running the job. `chk_file` must be not None as well.
:param str ln_running: If not None, this will be the base name for
linking the output file to the current directory. If chk_file is not
None, it will also be linked with the same base name.
:param str hold_jid: Job on which this job should depend.
This should be the name of another job in the queuing system.
:param str xyz: Name of an xyz file to use as input to use_gen_template
(if make_input is True).
:param str make_xyz: The name of a file to pass to obabel to be used to
create an xyz file to pass to use_gen_template.
:param bool make_input: If True, use_gen_template will be used to create
input for the Gaussian calculation.
:param dict ugt_dict: dict of arguments to pass to use_gen_template.
This should not include out_file, xyz, nproc, mem, or checkpoint
because those will all be used from other arguments to this function.
out_file will be input_name; xyz will be xyz or a time-based name if
make_xyz is not None; nproc will be $NSLOTS (useful if this gets
changed after job submission); mem will be mem; and checkpoint will
be chk_file.
:return: The name of the script file
:rtype: str
"""
rel_dir, file_name = os.path.split(input_name)
if file_name.endswith('.com'):
short_name = os.path.splitext(file_name)[0]
if not short_name + '.com' == file_name:
raise SyntaxError('problem interpreting file name. ' +
'Period in file name?')
out_name = short_name + '.out'
elif '.' in file_name:
short_name, input_extension = os.path.splitext(file_name)
if not short_name + '.' + input_extension == file_name:
raise SyntaxError('problem interpreting file name. ' +
'Period in file name?')
out_name = short_name + '.out'
else:
short_name = file_name
file_name = short_name + '.com'
print('Assuming input file is {}'.format(file_name))
out_name = short_name + '.out'
job_name = re.match(r'.*?([a-zA-Z].*)', short_name).group(1)
if len(job_name) == 0:
job_name = 'default'
_script_name = os.path.join(rel_dir, 'submit'+short_name+'.sh')
temp_xyz = os.path.abspath('.temp' +
datetime.datetime.now().strftime('%H%M%S%f') +
'.xyz')
if xyz is None or make_xyz is not None:
n_xyz = temp_xyz
else:
n_xyz = resolve_path(xyz)
temp_pkl = temp_xyz[:-4]
if ugt_dict is not None:
make_obj_dir()
pkl_path = save_obj(ugt_dict, temp_pkl)
if chk_file is not None:
chk_line = 'checkpoint=\'{}\','.format(chk_file)
else:
chk_line = ''
with open(_script_name, 'w') as script_file:
sfw = script_file.write
sfw('#!/bin/bash -l\n\n')
sfw('#$ -pe omp {}\n'.format(num_cores))
sfw('#$ -M [email protected]\n')
sfw('#$ -m eas\n')
sfw('#$ -l h_rt={}\n'.format(time))
sfw('#$ -l mem_total={}G\n'.format(mem))
sfw('#$ -N {}\n'.format(job_name))
sfw('#$ -j y\n')
sfw('#$ -o {}.log\n\n'.format(short_name))
if hold_jid is not None:
sfw('#$ -hold_jid {}\n\n'.format(hold_jid))
if make_xyz is not None:
sfw('if [ ! -f {} ]; then\n'.format(
os.path.abspath(make_xyz)) +
' exit 17\n'
'fi\n\n')
sfw('module load wxwidgets/3.0.2\n')
sfw('module load openbabel/2.4.1\n\n')
sfw('obabel {} -O {}\n\n'.format(os.path.abspath(
make_xyz), os.path.abspath(n_xyz)))
if make_input:
sfw('python -c "from gautools.tools import '
'use_gen_template as ugt;\n'
'from thtools import load_obj, get_node_mem;\n'
'm = get_node_mem();\n'
'd = load_obj(\'{}\');\n'.format(
os.path.abspath(pkl_path)) +
'ugt(\'{}\',\'{}\','.format(
file_name, os.path.abspath(n_xyz)) +
'nproc=$NSLOTS,mem=m,{}'.format(chk_line) +
'**d)"\n\n')
sfw('INPUTFILE={}\n'.format(file_name))
sfw('OUTPUTFILE={}\n'.format(out_name))
if chk_file is not None:
sfw('CHECKFILE={}\n\n'.format(chk_file))
else:
sfw('\n')
if ln_running is not None:
sfw('WORKINGOUT={}.out\n'.format(ln_running))
if chk_file is not None:
sfw('WORKINGCHK={}.chk\n\n'.format(ln_running))
else:
sfw('\n')
sfw('CURRENTDIR=`pwd`\n')
sfw('SCRATCHDIR=/scratch/$USER\n')
sfw('mkdir -p $SCRATCHDIR\n\n')
sfw('cd $SCRATCHDIR\n\n')
sfw('cp $CURRENTDIR/$INPUTFILE .\n')
if chk_file is not None:
sfw('# ') if not copy_chk else None
sfw('cp $CURRENTDIR/$CHECKFILE .\n\n')
else:
sfw('\n')
if ln_running is not None:
sfw('ln -s -b /net/`hostname -s`$PWD/$OUTPUTFILE '
'$CURRENTDIR/$WORKINGOUT\n')
if chk_file is not None:
sfw('ln -s -b /net/`hostname -s`$PWD/$CHECKFILE '
'$CURRENTDIR/$WORKINGCHK\n\n')
else:
sfw('\n')
sfw('echo About to run {} in /net/`'.format(executable) +
'hostname -s`$SCRATCHDIR\n\n')
sfw('{} <$INPUTFILE > $OUTPUTFILE'.format(executable))
sfw('\n\n')
if ln_running is not None:
sfw('rm $CURRENTDIR/$WORKINGOUT')
if chk_file is not None:
sfw(' $CURRENTDIR/$WORKINGCHK\n\n')
else:
sfw('\n\n')
sfw('cp $OUTPUTFILE $CURRENTDIR/.\n')
if chk_file is not None:
sfw('cp $CHECKFILE $CURRENTDIR/.\n\n')
else:
sfw('\n')
sfw('echo ran in /net/`hostname -s`$SCRATCHDIR\n')
sfw('echo output was copied to $CURRENTDIR\n\n')
if verbose:
print('script written to {}'.format(_script_name))
return _script_name
if __name__ == '__main__':
description = 'Create and submit a script to run a Gaussian job on SCC'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('in_name',
help='Name of Gaussian input file')
parser.add_argument('-c', '--numcores', type=int, default=16,
help='Number of cores for job')
# I should probably check validity of this time request
# Maybe it doesn't matter so much because it just won't
# submit the job and it will give quick feedback about that?
parser.add_argument('-t', '--time',
help='Time required as "hh:mm:ss"',
default='12:00:00')
parser.add_argument('-e', '--executable', type=str, default='g09',
help='name of executable to run')
parser.add_argument('-b', '--batch', action='store_true',
help='create multiple scripts (batch job)')
parser.add_argument('-x', '--template', default=None,
help='template file for creating input from coords')
parser.add_argument('-s', '--submit', action='store_true',
help='Automatically submit jobs?')
parser.add_argument('-v', '--verbose', action='store_true',
help='make program more verbose')
parser.add_argument('-j', '--nojobinfo', action='store_false',
help='Do not return the submitted job information')
parser.add_argument('-k', '--chk_file', default=None,
help='checkpoint file to be written and copied back')
parser.add_argument('--copy_chk', action='store_true',
help='Copy check file to the scratch directory')
parser.add_argument('-l', '--ln_running', type=str, default=None,
help='base name for linking output to cwd while '
'running')
parser.add_argument('-d', '--hold_jid', default=None,
help='job on which this job should depend')
args = parser.parse_args()
in_name_list, args.batch = get_input_files(args.in_name, args.batch)
if args.template:
in_name_list = use_template(args.template, in_name_list, args.verbose)
script_list = []
for in_name in in_name_list:
script_name = write_sub_script(input_name=in_name,
num_cores=args.numcores,
time=args.time,
verbose=args.verbose,
executable=args.executable,
chk_file=args.chk_file,
copy_chk=args.copy_chk,
ln_running=args.ln_running,
hold_jid=args.hold_jid)
script_list.append(script_name)
if not len(script_list) == len(in_name_list):
# This should never be the case as far as I know, but I would
# like to make sure everything input gets a script and all the
# script names are there to be submitted.
raise IOError('num scripts dif. from num names given')
job_info = submit_scripts(script_list, args.batch, args.submit,
args.verbose)
if job_info and args.nojobinfo:
for job in job_info:
print(job)
if args.verbose:
print('Done. Completed normally.')
| 44.496536 | 78 | 0.553537 |
16aff0c4c406b2f10dac6cda72a39c612f61400e
| 2,036 |
py
|
Python
|
experiments/recorder.py
|
WeiChengTseng/maddpg
|
f2813ab8bc43e2acbcc69818672e2e2fd305a007
|
[
"MIT"
] | 3 |
2022-01-04T13:32:11.000Z
|
2022-01-11T05:59:22.000Z
|
experiments/recorder.py
|
WeiChengTseng/maddpg
|
f2813ab8bc43e2acbcc69818672e2e2fd305a007
|
[
"MIT"
] | null | null | null |
experiments/recorder.py
|
WeiChengTseng/maddpg
|
f2813ab8bc43e2acbcc69818672e2e2fd305a007
|
[
"MIT"
] | null | null | null |
import json
import copy
import pdb
import numpy as np
import pickle
| 27.513514 | 66 | 0.515717 |
16b0c13e303ebbec34fd3a80391f02025c584689
| 589 |
py
|
Python
|
generate/dummy_data/mvp/gen_csv.py
|
ifekxp/data
|
f3571223f51b3fcc3a708d9ac82e76e3cc1ee068
|
[
"MIT"
] | null | null | null |
generate/dummy_data/mvp/gen_csv.py
|
ifekxp/data
|
f3571223f51b3fcc3a708d9ac82e76e3cc1ee068
|
[
"MIT"
] | null | null | null |
generate/dummy_data/mvp/gen_csv.py
|
ifekxp/data
|
f3571223f51b3fcc3a708d9ac82e76e3cc1ee068
|
[
"MIT"
] | null | null | null |
from faker import Faker
import csv
# Reference: https://pypi.org/project/Faker/
output = open('data.CSV', 'w', newline='')
fake = Faker()
header = ['name', 'age', 'street', 'city', 'state', 'zip', 'lng', 'lat']
mywriter=csv.writer(output)
mywriter.writerow(header)
for r in range(1000):
mywriter.writerow([
fake.name(),
fake.random_int(min=18, max=80, step=1),
fake.street_address(),
fake.city(),
fake.state(),
fake.zipcode(),
fake.longitude(),
fake.latitude()
])
output.close()
| 21.814815 | 73 | 0.556876 |
16b0eceb3e8aafd2e9b6e9e274abab88018c34aa
| 495 |
py
|
Python
|
subir/ingreso/migrations/0004_auto_20191003_1509.py
|
Brandon1625/subir
|
b827a30e64219fdc9de07689d2fb32e2c4bd02b7
|
[
"bzip2-1.0.6"
] | null | null | null |
subir/ingreso/migrations/0004_auto_20191003_1509.py
|
Brandon1625/subir
|
b827a30e64219fdc9de07689d2fb32e2c4bd02b7
|
[
"bzip2-1.0.6"
] | null | null | null |
subir/ingreso/migrations/0004_auto_20191003_1509.py
|
Brandon1625/subir
|
b827a30e64219fdc9de07689d2fb32e2c4bd02b7
|
[
"bzip2-1.0.6"
] | null | null | null |
# Generated by Django 2.2.4 on 2019-10-03 21:09
from django.db import migrations, models
import django.db.models.deletion
| 24.75 | 116 | 0.650505 |
16b1afada94a1ed1f6f7ce90f2dda1d6203c70b0
| 1,302 |
py
|
Python
|
pyscf/nao/test/test_0017_tddft_iter_nao.py
|
mfkasim1/pyscf
|
7be5e015b2b40181755c71d888449db936604660
|
[
"Apache-2.0"
] | 3 |
2021-02-28T00:52:53.000Z
|
2021-03-01T06:23:33.000Z
|
pyscf/nao/test/test_0017_tddft_iter_nao.py
|
mfkasim1/pyscf
|
7be5e015b2b40181755c71d888449db936604660
|
[
"Apache-2.0"
] | 36 |
2018-08-22T19:44:03.000Z
|
2020-05-09T10:02:36.000Z
|
pyscf/nao/test/test_0017_tddft_iter_nao.py
|
mfkasim1/pyscf
|
7be5e015b2b40181755c71d888449db936604660
|
[
"Apache-2.0"
] | 4 |
2018-02-14T16:28:28.000Z
|
2019-08-12T16:40:30.000Z
|
from __future__ import print_function, division
import os,unittest
from pyscf.nao import tddft_iter
dname = os.path.dirname(os.path.abspath(__file__))
td = tddft_iter(label='water', cd=dname)
try:
from pyscf.lib import misc
libnao_gpu = misc.load_library("libnao_gpu")
td_gpu = tddft_iter(label='water', cd=dname, GPU=True)
except:
td_gpu = None
if __name__ == "__main__": unittest.main()
| 33.384615 | 107 | 0.678955 |
16b1e0777507d0977f5c8842b27867dc734bcc90
| 898 |
py
|
Python
|
setup.py
|
dimasciput/osm2geojson
|
7b5ba25e39d80838d41f342237161e0fdc5e64b6
|
[
"MIT"
] | null | null | null |
setup.py
|
dimasciput/osm2geojson
|
7b5ba25e39d80838d41f342237161e0fdc5e64b6
|
[
"MIT"
] | null | null | null |
setup.py
|
dimasciput/osm2geojson
|
7b5ba25e39d80838d41f342237161e0fdc5e64b6
|
[
"MIT"
] | null | null | null |
import io
from os import path
from setuptools import setup
dirname = path.abspath(path.dirname(__file__))
with io.open(path.join(dirname, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='osm2geojson',
version='0.1.27',
license='MIT',
description='Parse OSM and Overpass JSON',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='geometry gis osm parsing',
author='Parfeniuk Mykola',
author_email='[email protected]',
url='https://github.com/aspectumapp/osm2geojson',
packages=['osm2geojson'],
include_package_data=True,
install_requires=parse_requirements("requirements.txt")
)
| 32.071429 | 73 | 0.722717 |
16b268fae933e4415a5583a098a6d7daa28d2e18
| 849 |
py
|
Python
|
Cap_11/ex11.6.py
|
gguilherme42/Livro-de-Python
|
465a509d50476fd1a87239c71ed741639d58418b
|
[
"MIT"
] | 4 |
2020-04-07T00:38:46.000Z
|
2022-03-10T03:34:42.000Z
|
Cap_11/ex11.6.py
|
gguilherme42/Livro-de-Python
|
465a509d50476fd1a87239c71ed741639d58418b
|
[
"MIT"
] | null | null | null |
Cap_11/ex11.6.py
|
gguilherme42/Livro-de-Python
|
465a509d50476fd1a87239c71ed741639d58418b
|
[
"MIT"
] | 1 |
2021-04-22T02:45:38.000Z
|
2021-04-22T02:45:38.000Z
|
import sqlite3
from contextlib import closing
nome = input('Nome do produto: ').lower().capitalize()
with sqlite3.connect('precos.db') as conexao:
with closing(conexao.cursor()) as cursor:
cursor.execute('SELECT * FROM Precos WHERE nome_produto = ?', (nome,))
registro = cursor.fetchone()
if not(registro is None):
print(f'Nome: {registro[0]} | Preo: R${registro[1]:.2f}')
valor = float(input('Novo valor: R$'))
cursor.execute('UPDATE Precos SET preco = ? WHERE nome_produto = ?', (valor, registro[0]))
if cursor.rowcount == 1:
conexao.commit()
print('Alterao gravada.')
else:
conexao.rollback()
print('Alterao abortada.')
else:
print(f'Produto {nome} no encontrado.')
| 38.590909 | 102 | 0.572438 |
16b631fdc9b05e860febb665678ebc3703e11591
| 4,882 |
py
|
Python
|
jet20/backend/solver.py
|
JTJL/jet20
|
2dc01ebf937f8501bcfb15c6641c569f8097ccf5
|
[
"MIT"
] | 1 |
2020-07-13T19:02:26.000Z
|
2020-07-13T19:02:26.000Z
|
jet20/backend/solver.py
|
JTJL/jet20
|
2dc01ebf937f8501bcfb15c6641c569f8097ccf5
|
[
"MIT"
] | null | null | null |
jet20/backend/solver.py
|
JTJL/jet20
|
2dc01ebf937f8501bcfb15c6641c569f8097ccf5
|
[
"MIT"
] | null | null | null |
import torch
import time
import copy
from jet20.backend.constraints import *
from jet20.backend.obj import *
from jet20.backend.config import *
from jet20.backend.core import solve,OPTIMAL,SUB_OPTIMAL,USER_STOPPED
import logging
logger = logging.getLogger(__name__)
| 26.247312 | 102 | 0.546907 |
16b8038b17e6b43264d1acbee80a12ded5b8d440
| 1,077 |
py
|
Python
|
tests/test_transforms.py
|
mengfu188/mmdetection.bak
|
0bc0ea591b5725468f83f9f48630a1e3ad599303
|
[
"Apache-2.0"
] | 2 |
2020-07-14T13:55:17.000Z
|
2021-05-07T11:25:31.000Z
|
tests/test_transforms.py
|
mengfu188/mmdetection.bak
|
0bc0ea591b5725468f83f9f48630a1e3ad599303
|
[
"Apache-2.0"
] | null | null | null |
tests/test_transforms.py
|
mengfu188/mmdetection.bak
|
0bc0ea591b5725468f83f9f48630a1e3ad599303
|
[
"Apache-2.0"
] | null | null | null |
import torch
from mmdet.datasets.pipelines.transforms import Pad
from mmdet.datasets.pipelines.transforms import FilterBox
import numpy as np
import cv2
if __name__ == '__main__':
# test_pad()
test_filter_box()
| 22.914894 | 57 | 0.535747 |
16b8947aeb5e92484b74a59f50dce7a8d1075f22
| 23,601 |
py
|
Python
|
dev/Tools/build/waf-1.7.13/lmbrwaflib/unit_test_lumberyard_modules.py
|
akulamartin/lumberyard
|
2d4be458a02845179be098e40cdc0c48f28f3b5a
|
[
"AML"
] | 8 |
2019-10-07T16:33:47.000Z
|
2020-12-07T03:59:58.000Z
|
dev/Tools/build/waf-1.7.13/lmbrwaflib/unit_test_lumberyard_modules.py
|
29e7e280-0d1c-4bba-98fe-f7cd3ca7500a/lumberyard
|
1c52b941dcb7d94341fcf21275fe71ff67173ada
|
[
"AML"
] | null | null | null |
dev/Tools/build/waf-1.7.13/lmbrwaflib/unit_test_lumberyard_modules.py
|
29e7e280-0d1c-4bba-98fe-f7cd3ca7500a/lumberyard
|
1c52b941dcb7d94341fcf21275fe71ff67173ada
|
[
"AML"
] | 4 |
2019-08-05T07:25:46.000Z
|
2020-12-07T05:12:55.000Z
|
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
from waflib import Errors
import lumberyard_modules
import unittest
import pytest
import utils
def test_SanitizeKWInput_SimpleKwDictionary_Success():
kw = dict(
libpath='mylib'
)
lumberyard_modules.sanitize_kw_input(kw)
assert isinstance(kw['libpath'], list)
assert kw['libpath'][0] == 'mylib'
def test_SanitizeKWInput_SimpleKwDictionaryInAdditionalSettings_Success():
kw = dict(
libpath='mylib',
additional_settings=dict(stlibpath='mystlib')
)
lumberyard_modules.sanitize_kw_input(kw)
assert isinstance(kw['libpath'], list)
assert kw['libpath'][0] == 'mylib'
assert isinstance(kw['additional_settings'], list)
assert isinstance(kw['additional_settings'][0], dict)
assert isinstance(kw['additional_settings'][0]['stlibpath'], list)
assert kw['additional_settings'][0]['stlibpath'][0] == 'mystlib'
class ProjectSettingsTest(unittest.TestCase):
def test_ProjectSettingsFileMergeKwDict_RecursiveMergeAdditionalSettingsNoPlatformNoConfiguration_Success(self):
"""
Test scenario:
Setup a project settings that contains other project settings, so that it can recursively call merge_kw_dict
recursively
"""
include_settings_file = 'include_test'
test_settings_single_include = {'includes': [include_settings_file]}
test_empty_settings = {}
test_merge_kw_key = 'passed'
test_merge_kw_value = True
self.mock_json_map = {'path': test_settings_single_include,
include_settings_file: test_empty_settings}
# Prepare a mock include settings object
test_include_settings = self.createSimpleSettings()
test_include_settings.merge_kw_dict = _mock_merge_kw_dict
# Prepare a mock context
fake_context = FakeContext()
fake_context.get_project_settings_file = _mock_get_project_settings_file
test_settings = self.createSimpleSettings(fake_context=fake_context,
test_dict=test_settings_single_include)
test_merge_kw = {}
test_settings.merge_kw_dict(target='test_target',
merge_kw=test_merge_kw,
platform=None,
configuration=None)
self.assertIn(test_merge_kw_key, test_merge_kw)
self.assertEqual(test_merge_kw[test_merge_kw_key], test_merge_kw_value)
def test_ProjectSettingsFileMergeKwDict_MergePlatformSection_Success(self):
"""
Test scenario:
Test the merge_kw_dict when only platform is set and not any configurations
"""
test_platform = 'test_platform'
test_alias = 'alias_1'
fake_context = FakeContext()
fake_platform_settings = FakePlatformSettings(platform_name='test_platform',
aliases={test_alias})
fake_context.get_platform_settings = _mock_get_platform_settings
test_dict = {}
test_settings = self.createSimpleSettings(fake_context=fake_context,
test_dict=test_dict)
sections_merged = set()
test_settings.merge_kw_section = _mock_merge_kw_section
test_merge_kw = {}
test_settings.merge_kw_dict(target='test_target',
merge_kw=test_merge_kw,
platform=test_platform,
configuration=None)
# Validate all the sections passed to the merge_kw_dict
self.assertIn('{}/*'.format(test_platform), sections_merged)
self.assertIn('{}/*'.format(test_alias), sections_merged)
self.assertEqual(len(sections_merged), 2)
def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestNoDedicatedSection_Success(self):
"""
Test scenario:
Test the merge_kw_dict when the platform + configuration is set, and the configuration is not a test nor
server configuration
"""
test_platform_name = 'test_platform'
test_configuration_name = 'test_configuration'
test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name))
fake_context = FakeContext()
fake_platform_settings = FakePlatformSettings(platform_name='test_platform')
fake_context.get_platform_settings = _mock_get_platform_settings
test_dict = {}
test_settings = self.createSimpleSettings(fake_context=fake_context,
test_dict=test_dict)
sections_merged = set()
test_settings.merge_kw_section = _mock_merge_kw_section
test_merge_kw = {}
test_settings.merge_kw_dict(target='test_target',
merge_kw=test_merge_kw,
platform=test_platform_name,
configuration=test_configuration)
# Validate all the sections passed to the merge_kw_dict
self.assertIn('{}/*'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertEqual(len(sections_merged), 2)
def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationDerivedNoTestNoDedicatedSection_Success(self):
"""
Test scenario:
Test the merge_kw_dict when the platform + configuration is set, and the configuration is not a test nor
server configuration, but is derived from another configuration
"""
test_platform_name = 'test_platform'
test_configuration_name = 'test_configuration'
base_test_configuration_name = 'base_configuration'
test_configuration = FakeConfiguration(
settings=FakeConfigurationSettings(settings_name=test_configuration_name,
base_config=FakeConfiguration(FakeConfigurationSettings(settings_name=base_test_configuration_name))))
fake_context = FakeContext()
fake_platform_settings = FakePlatformSettings(platform_name='test_platform')
fake_context.get_platform_settings = _mock_get_platform_settings
test_dict = {}
test_settings = self.createSimpleSettings(fake_context=fake_context,
test_dict=test_dict)
sections_merged = set()
test_settings.merge_kw_section = _mock_merge_kw_section
test_merge_kw = {}
test_settings.merge_kw_dict(target='test_target',
merge_kw=test_merge_kw,
platform=test_platform_name,
configuration=test_configuration)
# Validate all the sections passed to the merge_kw_dict
self.assertIn('{}/*'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('{}/{}'.format(test_platform_name, base_test_configuration_name), sections_merged)
self.assertEqual(len(sections_merged), 3)
def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestDedicatedSection_Success(self):
"""
Test scenario:
Test the merge_kw_dict when the platform + configuration is set, and the configuration is a test and a
server configuration
"""
test_platform_name = 'test_platform'
test_configuration_name = 'test_configuration'
test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name),
is_test=True,
is_server=True)
fake_context = FakeContext()
fake_platform_settings = FakePlatformSettings(platform_name='test_platform')
fake_context.get_platform_settings = _mock_get_platform_settings
test_dict = {}
test_settings = self.createSimpleSettings(fake_context=fake_context,
test_dict=test_dict)
sections_merged = set()
test_settings.merge_kw_section = _mock_merge_kw_section
test_merge_kw = {}
test_settings.merge_kw_dict(target='test_target',
merge_kw=test_merge_kw,
platform=test_platform_name,
configuration=test_configuration)
# Validate all the sections passed to the merge_kw_dict
self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/dedicated,test', sections_merged)
self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/test,dedicated', sections_merged)
self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertEqual(len(sections_merged), 8)
def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestNoDedicatedSection_Success(self):
"""
Test scenario:
Test the merge_kw_dict when the platform + configuration is set, and the configuration is a test but not a
server configuration
"""
test_platform_name = 'test_platform'
test_configuration_name = 'test_configuration'
test_configuration = FakeConfiguration(
settings=FakeConfigurationSettings(settings_name=test_configuration_name),
is_test=True,
is_server=False)
fake_context = FakeContext()
fake_platform_settings = FakePlatformSettings(platform_name='test_platform')
fake_context.get_platform_settings = _mock_get_platform_settings
test_dict = {}
test_settings = self.createSimpleSettings(fake_context=fake_context,
test_dict=test_dict)
sections_merged = set()
test_settings.merge_kw_section = _mock_merge_kw_section
test_merge_kw = {}
test_settings.merge_kw_dict(target='test_target',
merge_kw=test_merge_kw,
platform=test_platform_name,
configuration=test_configuration)
# Validate all the sections passed to the merge_kw_dict
self.assertIn('{}/*'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/test', sections_merged)
self.assertIn('{}/*/test'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/test'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/dedicated,test', sections_merged)
self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/test,dedicated', sections_merged)
self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertEqual(len(sections_merged), 11)
def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestDedicatedSection_Success(self):
"""
Test scenario:
Test the merge_kw_dict when the platform + configuration is set, and the configuration is a server but not a
test configuration
"""
test_platform_name = 'test_platform'
test_configuration_name = 'test_configuration'
test_configuration = FakeConfiguration(
settings=FakeConfigurationSettings(settings_name=test_configuration_name),
is_test=False,
is_server=True)
fake_context = FakeContext()
fake_platform_settings = FakePlatformSettings(platform_name='test_platform')
fake_context.get_platform_settings = _mock_get_platform_settings
test_dict = {}
test_settings = self.createSimpleSettings(fake_context=fake_context,
test_dict=test_dict)
sections_merged = set()
test_settings.merge_kw_section = _mock_merge_kw_section
test_merge_kw = {}
test_settings.merge_kw_dict(target='test_target',
merge_kw=test_merge_kw,
platform=test_platform_name,
configuration=test_configuration)
# Validate all the sections passed to the merge_kw_dict
self.assertIn('{}/*'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/dedicated', sections_merged)
self.assertIn('{}/*/dedicated'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/dedicated'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/dedicated,test', sections_merged)
self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/test,dedicated', sections_merged)
self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertEqual(len(sections_merged), 11)
| 43.304587 | 191 | 0.637897 |
16ba68b504461ec3bb45c6f18a8ccf9704c15e7e
| 7,471 |
py
|
Python
|
linprog_curvefit.py
|
drofp/linprog_curvefit
|
96ba704edae7cea42d768d7cc6d4036da2ba313a
|
[
"Apache-2.0"
] | null | null | null |
linprog_curvefit.py
|
drofp/linprog_curvefit
|
96ba704edae7cea42d768d7cc6d4036da2ba313a
|
[
"Apache-2.0"
] | 3 |
2019-11-22T08:04:18.000Z
|
2019-11-26T06:55:36.000Z
|
linprog_curvefit.py
|
drofp/linprog_curvefit
|
96ba704edae7cea42d768d7cc6d4036da2ba313a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""Curve fitting with linear programming.
Minimizes the sum of error for each fit point to find the optimal coefficients
for a given polynomial.
Overview:
Objective: Sum of errors
Subject to: Bounds on coefficients
Credit: "Curve Fitting with Linear Programming", H. Swanson and R. E. D. Woolsey
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import string
from ortools.linear_solver import pywraplp
def _generate_variables(solver, points, coeff_ranges, err_max, error_def):
"""Create coefficient variables.
Initial version works for up to 26 variable polynomial. One letter per
english alphabet used for coefficient names.
TODO(drofp): Figure out naming scheme for arbitrary number of variables.
"""
num_of_coeff = len(coeff_ranges)
variables = []
coeff_names = []
# Add coefficients to variable list.
if num_of_coeff == 2:
coeff_names.append('m')
coeff_names.append('b')
else:
for letter_cnt in range(num_of_coeff):
coeff_names.append(string.ascii_lowercase[letter_cnt])
for coeff_num in range(num_of_coeff):
if coeff_ranges[coeff_num][0] is None:
lower_bound = -solver.Infinity()
else:
lower_bound = coeff_ranges[coeff_num][0]
if coeff_ranges[coeff_num][1] is None:
upper_bound = solver.Infinity()
else:
upper_bound = coeff_ranges[coeff_num][1]
variables.append(
solver.NumVar(lower_bound, upper_bound, coeff_names[coeff_num]))
# Add absolute error variables to variable list
for point_cnt in range(len(points)):
positive_err_var = solver.NumVar(
0, err_max, 'e' + str(point_cnt + 1) + '_plus')
negative_err_var = solver.NumVar(
0, err_max, 'e' + str(point_cnt + 1) + '_minus')
variables.append(positive_err_var)
variables.append(negative_err_var)
return variables
def _generate_objective_fn(
solver, num_of_coeff, variables, error_def=ErrorDefinition.SUM_ABS_DEV):
"""Generate objective function for given error definition."""
objective = solver.Objective()
for variable in variables[num_of_coeff:]:
objective.SetCoefficient(variable, 1)
return objective
def get_optimal_polynomial(
points=None, coeff_ranges=None, error_def=ErrorDefinition.SUM_ABS_DEV,
err_max=10000, solver=None):
"""Optimize coefficients for any order polynomial.
Args:
points: A tuple of points, represented as tuples (x, y)
coeff_ranges: A tuple of valid coefficient ranges, respresented as tuples
(min, max). Nubmer of elements in list determines order of polynomial,
from highest order (0th index) to lowest order (nth index).
err_def: An ErrorDefinition enum, specifying the definition for error.
err_max: An Integer, specifying the maximum error allowable.
solver: a ortools.pywraplp.Solver object, if a specific solver instance is
requested by caller.
Returns:
A Dictionary, the desired coefficients mapped to ther values.
"""
if coeff_ranges is None:
raise ValueError('Please provide appropriate coefficient range.')
if solver is None:
solver = pywraplp.Solver(
'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)
variables = _generate_variables(
solver, points, coeff_ranges, err_max=err_max,
error_def=error_def)
num_of_coeff = len(coeff_ranges)
_generate_objective_fn(solver, num_of_coeff, variables)
_generate_constraints(solver, points, num_of_coeff, variables)
solver.Solve()
var_to_val = dict()
for coeff in variables[:num_of_coeff]:
var_to_val[coeff.name()] = coeff.solution_value()
return var_to_val
def demo_optimal_linear_5points():
"""Demonstration of getting optimal linear polynomial.
Uses 5 points from Swanson's curve fitting paper.
"""
print('STARTING LINEAR DEMO WITH 5 POINTS FROM SWANSON PAPER')
points = (0,1), (1,3), (2,2), (3,4), (4,5)
coeff_ranges = ((None, None), (None, None))
# solver = pywraplp.Solver(
# 'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)
optimized_coefficients = get_optimal_polynomial(
points=points, coeff_ranges=coeff_ranges)
for elm in optimized_coefficients:
print('elm: {}'.format(elm))
print(
'type(optimized_coefficients): {}'.format(
type(optimized_coefficients)))
print('optimized_coefficients: {}'.format(optimized_coefficients))
# m, b = optimized_coefficients
# print('Optimized m: {}, b: {}'.format(m, b))
if __name__ == '__main__':
main()
| 39.115183 | 80 | 0.674073 |
16bce26f2376d0aa7170df9f650a479bf160647c
| 11,177 |
py
|
Python
|
build-script-helper.py
|
aciidb0mb3r/swift-stress-tester
|
aad9df89d2aae4640e9f4e06c234818c6b3ed434
|
[
"Apache-2.0"
] | null | null | null |
build-script-helper.py
|
aciidb0mb3r/swift-stress-tester
|
aad9df89d2aae4640e9f4e06c234818c6b3ed434
|
[
"Apache-2.0"
] | null | null | null |
build-script-helper.py
|
aciidb0mb3r/swift-stress-tester
|
aad9df89d2aae4640e9f4e06c234818c6b3ed434
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
This source file is part of the Swift.org open source project
Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors
Licensed under Apache License v2.0 with Runtime Library Exception
See https://swift.org/LICENSE.txt for license information
See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
------------------------------------------------------------------------------
This is a helper script for the main swift repository's build-script.py that
knows how to build and install the stress tester utilities given a swift
workspace.
"""
from __future__ import print_function
import argparse
import sys
import os, platform
import subprocess
# Returns true if any of the actions in `action_names` should be run.
if __name__ == '__main__':
main()
| 38.277397 | 204 | 0.711014 |
16bd3669143df2de8767a9c8bf39a0f217eb03a8
| 1,701 |
py
|
Python
|
tests/components/deconz/test_scene.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 1 |
2021-07-08T20:09:55.000Z
|
2021-07-08T20:09:55.000Z
|
tests/components/deconz/test_scene.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 47 |
2021-02-21T23:43:07.000Z
|
2022-03-31T06:07:10.000Z
|
tests/components/deconz/test_scene.py
|
OpenPeerPower/core
|
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
|
[
"Apache-2.0"
] | null | null | null |
"""deCONZ scene platform tests."""
from unittest.mock import patch
from openpeerpower.components.scene import DOMAIN as SCENE_DOMAIN, SERVICE_TURN_ON
from openpeerpower.const import ATTR_ENTITY_ID
from .test_gateway import (
DECONZ_WEB_REQUEST,
mock_deconz_put_request,
setup_deconz_integration,
)
| 27.885246 | 82 | 0.627278 |
16bd643a28b81f74d29d0b9a43b20d245093f663
| 12,716 |
py
|
Python
|
tensorhive/config.py
|
roscisz/TensorHive
|
4a680f47a0ee1ce366dc82ad9964e229d9749c4e
|
[
"Apache-2.0"
] | 129 |
2017-08-25T11:45:15.000Z
|
2022-03-29T05:11:25.000Z
|
tensorhive/config.py
|
roscisz/TensorHive
|
4a680f47a0ee1ce366dc82ad9964e229d9749c4e
|
[
"Apache-2.0"
] | 251 |
2017-07-27T10:05:58.000Z
|
2022-03-02T12:46:13.000Z
|
tensorhive/config.py
|
roscisz/TensorHive
|
4a680f47a0ee1ce366dc82ad9964e229d9749c4e
|
[
"Apache-2.0"
] | 20 |
2017-08-13T13:05:14.000Z
|
2022-03-19T02:21:37.000Z
|
from pathlib import PosixPath
import configparser
from typing import Dict, Optional, Any, List
from inspect import cleandoc
import shutil
import tensorhive
import os
import logging
log = logging.getLogger(__name__)
ConfigInitilizer()
config = ConfigLoader.load(CONFIG_FILES.MAIN_CONFIG_PATH, displayed_title='main')
def display_config(cls):
'''
Displays all uppercase class atributes (class must be defined first)
Example usage: display_config(API_SERVER)
'''
print('[{class_name}]'.format(class_name=cls.__name__))
for key, value in cls.__dict__.items():
if key.isupper():
print('{} = {}'.format(key, value))
def check_env_var(name: str):
'''Makes sure that env variable is declared'''
if not os.getenv(name):
msg = cleandoc(
'''
{env} - undeclared environment variable!
Try this: `export {env}="..."`
''').format(env=name).split('\n')
log.warning(msg[0])
log.warning(msg[1])
| 42.959459 | 118 | 0.681268 |
16bdc023e7792aee5f95f6dd1ec12e9328dbed08
| 4,534 |
py
|
Python
|
model.py
|
iz2late/baseline-seq2seq
|
2bfa8981083aed8d30befeb42e41fe78d8ec1641
|
[
"MIT"
] | 1 |
2021-01-06T20:49:32.000Z
|
2021-01-06T20:49:32.000Z
|
model.py
|
iz2late/baseline-seq2seq
|
2bfa8981083aed8d30befeb42e41fe78d8ec1641
|
[
"MIT"
] | null | null | null |
model.py
|
iz2late/baseline-seq2seq
|
2bfa8981083aed8d30befeb42e41fe78d8ec1641
|
[
"MIT"
] | null | null | null |
import random
from typing import Tuple
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch import Tensor
| 37.163934 | 90 | 0.635862 |
16be469a1debb4ce731178e138eb07a68236018a
| 7,907 |
py
|
Python
|
ML/Pytorch/more_advanced/Seq2Seq/seq2seq.py
|
xuyannus/Machine-Learning-Collection
|
6d5dcd18d4e40f90e77355d56a2902e4c617ecbe
|
[
"MIT"
] | 3,094 |
2020-09-20T04:34:31.000Z
|
2022-03-31T23:59:46.000Z
|
ML/Pytorch/more_advanced/Seq2Seq/seq2seq.py
|
xkhainguyen/Machine-Learning-Collection
|
425d196e9477dbdbbd7cc0d19d29297571746ab5
|
[
"MIT"
] | 79 |
2020-09-24T08:54:17.000Z
|
2022-03-30T14:45:08.000Z
|
ML/Pytorch/more_advanced/Seq2Seq/seq2seq.py
|
xkhainguyen/Machine-Learning-Collection
|
425d196e9477dbdbbd7cc0d19d29297571746ab5
|
[
"MIT"
] | 1,529 |
2020-09-20T16:21:21.000Z
|
2022-03-31T21:16:25.000Z
|
import torch
import torch.nn as nn
import torch.optim as optim
from torchtext.datasets import Multi30k
from torchtext.data import Field, BucketIterator
import numpy as np
import spacy
import random
from torch.utils.tensorboard import SummaryWriter # to print to tensorboard
from utils import translate_sentence, bleu, save_checkpoint, load_checkpoint
spacy_ger = spacy.load("de")
spacy_eng = spacy.load("en")
german = Field(tokenize=tokenize_ger, lower=True, init_token="<sos>", eos_token="<eos>")
english = Field(
tokenize=tokenize_eng, lower=True, init_token="<sos>", eos_token="<eos>"
)
train_data, valid_data, test_data = Multi30k.splits(
exts=(".de", ".en"), fields=(german, english)
)
german.build_vocab(train_data, max_size=10000, min_freq=2)
english.build_vocab(train_data, max_size=10000, min_freq=2)
### We're ready to define everything we need for training our Seq2Seq model ###
# Training hyperparameters
num_epochs = 100
learning_rate = 0.001
batch_size = 64
# Model hyperparameters
load_model = False
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
input_size_encoder = len(german.vocab)
input_size_decoder = len(english.vocab)
output_size = len(english.vocab)
encoder_embedding_size = 300
decoder_embedding_size = 300
hidden_size = 1024 # Needs to be the same for both RNN's
num_layers = 2
enc_dropout = 0.5
dec_dropout = 0.5
# Tensorboard to get nice loss plot
writer = SummaryWriter(f"runs/loss_plot")
step = 0
train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=batch_size,
sort_within_batch=True,
sort_key=lambda x: len(x.src),
device=device,
)
encoder_net = Encoder(
input_size_encoder, encoder_embedding_size, hidden_size, num_layers, enc_dropout
).to(device)
decoder_net = Decoder(
input_size_decoder,
decoder_embedding_size,
hidden_size,
output_size,
num_layers,
dec_dropout,
).to(device)
model = Seq2Seq(encoder_net, decoder_net).to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
pad_idx = english.vocab.stoi["<pad>"]
criterion = nn.CrossEntropyLoss(ignore_index=pad_idx)
if load_model:
load_checkpoint(torch.load("my_checkpoint.pth.tar"), model, optimizer)
sentence = "ein boot mit mehreren mnnern darauf wird von einem groen pferdegespann ans ufer gezogen."
for epoch in range(num_epochs):
print(f"[Epoch {epoch} / {num_epochs}]")
checkpoint = {"state_dict": model.state_dict(), "optimizer": optimizer.state_dict()}
save_checkpoint(checkpoint)
model.eval()
translated_sentence = translate_sentence(
model, sentence, german, english, device, max_length=50
)
print(f"Translated example sentence: \n {translated_sentence}")
model.train()
for batch_idx, batch in enumerate(train_iterator):
# Get input and targets and get to cuda
inp_data = batch.src.to(device)
target = batch.trg.to(device)
# Forward prop
output = model(inp_data, target)
# Output is of shape (trg_len, batch_size, output_dim) but Cross Entropy Loss
# doesn't take input in that form. For example if we have MNIST we want to have
# output to be: (N, 10) and targets just (N). Here we can view it in a similar
# way that we have output_words * batch_size that we want to send in into
# our cost function, so we need to do some reshapin. While we're at it
# Let's also remove the start token while we're at it
output = output[1:].reshape(-1, output.shape[2])
target = target[1:].reshape(-1)
optimizer.zero_grad()
loss = criterion(output, target)
# Back prop
loss.backward()
# Clip to avoid exploding gradient issues, makes sure grads are
# within a healthy range
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1)
# Gradient descent step
optimizer.step()
# Plot to tensorboard
writer.add_scalar("Training loss", loss, global_step=step)
step += 1
score = bleu(test_data[1:100], model, german, english, device)
print(f"Bleu score {score*100:.2f}")
| 32.539095 | 103 | 0.682433 |
16beddc32cad55aeba19e5840d544ba51efbce38
| 2,533 |
py
|
Python
|
gail_chatbot/light/sqil/light_sentence_imitate_mixin.py
|
eublefar/gail_chatbot
|
fcb7798515c0e2c031b5127803eb8a9f1fd4f0ab
|
[
"MIT"
] | null | null | null |
gail_chatbot/light/sqil/light_sentence_imitate_mixin.py
|
eublefar/gail_chatbot
|
fcb7798515c0e2c031b5127803eb8a9f1fd4f0ab
|
[
"MIT"
] | null | null | null |
gail_chatbot/light/sqil/light_sentence_imitate_mixin.py
|
eublefar/gail_chatbot
|
fcb7798515c0e2c031b5127803eb8a9f1fd4f0ab
|
[
"MIT"
] | null | null | null |
from typing import Dict, Any, List
import string
from parlai.core.agents import Agent
from parlai.core.message import Message
from random import sample
import pathlib
path = pathlib.Path(__file__).parent.absolute()
| 29.8 | 91 | 0.586656 |
16bf36b1dcc9b129dcd361097fbc1ea1ea920674
| 1,654 |
py
|
Python
|
pytudes/_2021/educative/grokking_the_coding_interview/fast_and_slow_pointers/_1__linked_list_cycle__easy.py
|
TeoZosa/pytudes
|
4f01ab20f936bb4b3f42d1946180d4a20fd95fbf
|
[
"Apache-2.0"
] | 1 |
2022-02-08T09:47:35.000Z
|
2022-02-08T09:47:35.000Z
|
pytudes/_2021/educative/grokking_the_coding_interview/fast_and_slow_pointers/_1__linked_list_cycle__easy.py
|
TeoZosa/pytudes
|
4f01ab20f936bb4b3f42d1946180d4a20fd95fbf
|
[
"Apache-2.0"
] | 62 |
2021-04-02T23:41:16.000Z
|
2022-03-25T13:16:10.000Z
|
pytudes/_2021/educative/grokking_the_coding_interview/fast_and_slow_pointers/_1__linked_list_cycle__easy.py
|
TeoZosa/pytudes
|
4f01ab20f936bb4b3f42d1946180d4a20fd95fbf
|
[
"Apache-2.0"
] | null | null | null |
"""https://www.educative.io/courses/grokking-the-coding-interview/N7rwVyAZl6D
Categories:
- Binary
- Bit Manipulation
- Blind 75
See Also:
- pytudes/_2021/leetcode/blind_75/linked_list/_141__linked_list_cycle__easy.py
"""
from pytudes._2021.utils.linked_list import (
ListNode,
NodeType,
convert_list_to_linked_list,
)
def has_cycle(head: NodeType) -> bool:
"""
Args:
head: head of a singly-linked list of nodes
Returns:
whether or not the linked list has a cycle
Examples:
>>> has_cycle(None)
False
>>> head = ListNode("self-edge")
>>> head.next = head
>>> has_cycle(head)
True
>>> head = convert_list_to_linked_list([1,2,3,4,5,6])
>>> has_cycle(head)
False
>>> head.next.next.next.next.next.next = head.next.next
>>> has_cycle(head)
True
>>> head.next.next.next.next.next.next = head.next.next.next
>>> has_cycle(head)
True
"""
slow = fast = head
while fast is not None and fast.next is not None: # since fast slow
slow = slow.next
fast = fast.next.next
if slow == fast:
return True # found the cycle
else:
return False
main()
| 23.971014 | 82 | 0.605804 |
16bf4f8f27c28015e220b292e189af4ce08ed99c
| 4,417 |
py
|
Python
|
httpd.py
|
whtt8888/TritonHTTPserver
|
99adf3f1e6c3867bb870cda8434605c59409ea19
|
[
"MIT"
] | 2 |
2019-04-07T06:11:56.000Z
|
2019-10-14T05:08:16.000Z
|
httpd.py
|
whtt8888/TritonHTTPserver
|
99adf3f1e6c3867bb870cda8434605c59409ea19
|
[
"MIT"
] | null | null | null |
httpd.py
|
whtt8888/TritonHTTPserver
|
99adf3f1e6c3867bb870cda8434605c59409ea19
|
[
"MIT"
] | null | null | null |
import sys
import os
import socket
import time
import threading
if __name__ == '__main__':
input_port = int(sys.argv[1])
input_doc_root = sys.argv[2]
server = MyServer(input_port, input_doc_root)
# Add code to start your server here
threads = []
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((server.host, server.port))
s.listen()
while True:
conn, addr = s.accept()
t = threading.Thread(target=createsocket(conn, addr), args=(conn, addr))
t.start()
threads.append(t)
for t in threads:
t.join()
| 31.105634 | 85 | 0.479511 |
16c081effc971dd24b22b938117db5e30575dfca
| 1,179 |
py
|
Python
|
pf_pweb_sourceman/task/git_repo_man.py
|
problemfighter/pf-pweb-sourceman
|
827b1d92ac992ec1495b128e99137aab1cfa09a0
|
[
"Apache-2.0"
] | null | null | null |
pf_pweb_sourceman/task/git_repo_man.py
|
problemfighter/pf-pweb-sourceman
|
827b1d92ac992ec1495b128e99137aab1cfa09a0
|
[
"Apache-2.0"
] | null | null | null |
pf_pweb_sourceman/task/git_repo_man.py
|
problemfighter/pf-pweb-sourceman
|
827b1d92ac992ec1495b128e99137aab1cfa09a0
|
[
"Apache-2.0"
] | null | null | null |
from git import Repo
from pf_pweb_sourceman.common.console import console
from pf_py_file.pfpf_file_util import PFPFFileUtil
| 33.685714 | 84 | 0.63274 |
16c22952eef284ef2bbd4cfa4e2bbaa9380b0ceb
| 2,969 |
py
|
Python
|
tool/remote_info.py
|
shanmukmichael/Asset-Discovery-Tool
|
82c3f2f5cecb394a1ad87b2e504fbef219a466fd
|
[
"MIT"
] | null | null | null |
tool/remote_info.py
|
shanmukmichael/Asset-Discovery-Tool
|
82c3f2f5cecb394a1ad87b2e504fbef219a466fd
|
[
"MIT"
] | null | null | null |
tool/remote_info.py
|
shanmukmichael/Asset-Discovery-Tool
|
82c3f2f5cecb394a1ad87b2e504fbef219a466fd
|
[
"MIT"
] | null | null | null |
import socket
import paramiko
import json
Hostname = '34.224.2.243'
Username = 'ec2-user'
key = 'G:/Projects/Python/Asset-Discovery-Tool/tool/s.pem'
is_connected()
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=Hostname, username=Username, key_filename=key)
except paramiko.AuthenticationException:
print("Failed to connect to {} due to wrong username/password".format(Hostname))
exit(1)
except:
print("Failed to connect to {} ".format(Hostname))
exit(2)
# commands
_, stdout_1, _ = ssh.exec_command("hostname")
_, stdout_2, _ = ssh.exec_command("hostname -I | awk '{print $1}'")
_, stdout_3, _ = ssh.exec_command("cat /sys/class/net/eth0/address")
_, stdout_4, _ = ssh.exec_command(
"awk -F= '$1=={} {{ print $2 ;}}' /etc/os-release".format('"NAME"'))
_, stdout_5, _ = ssh.exec_command("whoami")
_, stdout_6, _ = ssh.exec_command("last -F")
_, stdout_7, _ = ssh.exec_command("netstat -tnpa | grep 'ESTABLISHED.*sshd'")
#_, stdout_8, _ = ssh.exec_command("sudo {}/24".format())
# egrep -o '([0-9]{1,3}\.){3}[0-9]{1,3}' --IP-address
# ---------------------------------
# ----------------------------------
| 29.39604 | 84 | 0.594139 |
16c3880f871252c2ad2ebcf1bd3aca25678856cb
| 16,099 |
py
|
Python
|
hvac/api/secrets_engines/kv_v2.py
|
Famoco/hvac
|
cdc1854385dd981de38bcb6350f222a52bcf3923
|
[
"Apache-2.0"
] | null | null | null |
hvac/api/secrets_engines/kv_v2.py
|
Famoco/hvac
|
cdc1854385dd981de38bcb6350f222a52bcf3923
|
[
"Apache-2.0"
] | null | null | null |
hvac/api/secrets_engines/kv_v2.py
|
Famoco/hvac
|
cdc1854385dd981de38bcb6350f222a52bcf3923
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""KvV2 methods module."""
from hvac import exceptions, utils
from hvac.api.vault_api_base import VaultApiBase
DEFAULT_MOUNT_POINT = 'secret'
| 42.254593 | 127 | 0.638611 |
16c3b1e6ee4edc3e7c6e66622f8ee4afa8a44dad
| 512 |
py
|
Python
|
android/install-all.py
|
SaschaWillems/vulkan_slim
|
642bcf1eaba8bbcb94a8bec61f3454c597af72f9
|
[
"MIT"
] | 28 |
2017-09-04T18:54:49.000Z
|
2021-09-18T11:52:04.000Z
|
android/install-all.py
|
0xm1nam0/Vulkan
|
ea726e617f71f5ff5c1503bca134b2a7ad17a1a7
|
[
"MIT"
] | null | null | null |
android/install-all.py
|
0xm1nam0/Vulkan
|
ea726e617f71f5ff5c1503bca134b2a7ad17a1a7
|
[
"MIT"
] | 1 |
2018-07-20T06:51:08.000Z
|
2018-07-20T06:51:08.000Z
|
# Install all examples to connected device(s)
import subprocess
import sys
answer = input("Install all vulkan examples to attached device, this may take some time! (Y/N)").lower() == 'y'
if answer:
BUILD_ARGUMENTS = ""
for arg in sys.argv[1:]:
if arg == "-validation":
BUILD_ARGUMENTS += "-validation"
if subprocess.call(("python build-all.py -deploy %s" % BUILD_ARGUMENTS).split(' ')) != 0:
print("Error: Not all examples may have been installed!")
sys.exit(-1)
| 36.571429 | 111 | 0.644531 |
16c4d3d9ff39c41395ea4a9779719c084f2fc55a
| 1,726 |
py
|
Python
|
main.py
|
juangallostra/moonboard
|
d4a35857d480ee4bed06faee44e0347e1070b6b8
|
[
"MIT"
] | null | null | null |
main.py
|
juangallostra/moonboard
|
d4a35857d480ee4bed06faee44e0347e1070b6b8
|
[
"MIT"
] | null | null | null |
main.py
|
juangallostra/moonboard
|
d4a35857d480ee4bed06faee44e0347e1070b6b8
|
[
"MIT"
] | null | null | null |
from generators.ahoughton import AhoughtonGenerator
from render_config import RendererConfig
from problem_renderer import ProblemRenderer
from moonboard import get_moonboard
from adapters.default import DefaultProblemAdapter
from adapters.crg import CRGProblemAdapter
from adapters.ahoughton import AhoughtonAdapter
import json
if __name__ == "__main__":
main()
| 30.280702 | 113 | 0.707995 |
16c4fdb052f6373448ef88971819f508813eb2d7
| 5,228 |
py
|
Python
|
GearBot/Util/Pages.py
|
JohnyTheCarrot/GearBot
|
8a32bfc79f997a154c9abccbf6742a79fc5257b0
|
[
"MIT"
] | null | null | null |
GearBot/Util/Pages.py
|
JohnyTheCarrot/GearBot
|
8a32bfc79f997a154c9abccbf6742a79fc5257b0
|
[
"MIT"
] | null | null | null |
GearBot/Util/Pages.py
|
JohnyTheCarrot/GearBot
|
8a32bfc79f997a154c9abccbf6742a79fc5257b0
|
[
"MIT"
] | null | null | null |
import discord
from Util import Utils, Emoji, Translator
page_handlers = dict()
known_messages = dict()
def basic_pages(pages, page_num, action):
if action == "PREV":
page_num -= 1
elif action == "NEXT":
page_num += 1
if page_num < 0:
page_num = len(pages) - 1
if page_num >= len(pages):
page_num = 0
page = pages[page_num]
return page, page_num
def paginate(input, max_lines=20, max_chars=1900, prefix="", suffix=""):
max_chars -= len(prefix) + len(suffix)
lines = str(input).splitlines(keepends=True)
pages = []
page = ""
count = 0
for line in lines:
if len(page) + len(line) > max_chars or count == max_lines:
if page == "":
# single 2k line, split smaller
words = line.split(" ")
for word in words:
if len(page) + len(word) > max_chars:
pages.append(f"{prefix}{page}{suffix}")
page = f"{word} "
else:
page += f"{word} "
else:
pages.append(f"{prefix}{page}{suffix}")
page = line
count = 1
else:
page += line
count += 1
pages.append(f"{prefix}{page}{suffix}")
return pages
def paginate_fields(input):
pages = []
for page in input:
page_fields = dict()
for name, content in page.items():
page_fields[name] = paginate(content, max_chars=1024)
pages.append(page_fields)
real_pages = []
for page in pages:
page_count = 0
page_fields = dict()
for name, parts in page.items():
base_name = name
if len(parts) is 1:
if page_count + len(name) + len(parts[0]) > 4000:
real_pages.append(page_fields)
page_fields = dict()
page_count = 0
page_fields[name] = parts[0]
page_count += len(name) + len(parts[0])
else:
for i in range(len(parts)):
part = parts[i]
name = f"{base_name} ({i+1}/{len(parts)})"
if page_count + len(name) + len(part) > 3000:
real_pages.append(page_fields)
page_fields = dict()
page_count = 0
page_fields[name] = part
page_count += len(name) + len(part)
real_pages.append(page_fields)
return real_pages
def save_to_disc():
Utils.saveToDisk("known_messages", known_messages)
def load_from_disc():
global known_messages
known_messages = Utils.fetch_from_disk("known_messages")
| 33.299363 | 213 | 0.55394 |
16c7d2d61e641808d594577e77047ea93c4d6c86
| 8,007 |
py
|
Python
|
software/Opal/spud/diamond/build/lib.linux-x86_64-2.7/diamond/dialogs.py
|
msc-acse/acse-9-independent-research-project-Wade003
|
cfcba990d52ccf535171cf54c0a91b184db6f276
|
[
"MIT"
] | 2 |
2020-05-11T02:39:46.000Z
|
2020-05-11T03:08:38.000Z
|
software/multifluids_icferst/libspud/diamond/build/lib.linux-x86_64-2.7/diamond/dialogs.py
|
msc-acse/acse-9-independent-research-project-Wade003
|
cfcba990d52ccf535171cf54c0a91b184db6f276
|
[
"MIT"
] | null | null | null |
software/multifluids_icferst/libspud/diamond/build/lib.linux-x86_64-2.7/diamond/dialogs.py
|
msc-acse/acse-9-independent-research-project-Wade003
|
cfcba990d52ccf535171cf54c0a91b184db6f276
|
[
"MIT"
] | 2 |
2020-05-21T22:50:19.000Z
|
2020-10-28T17:16:31.000Z
|
#!/usr/bin/env python
# This file is part of Diamond.
#
# Diamond is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Diamond is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Diamond. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import traceback
import gtk
import pygtkconsole
def prompt(parent, message, type = gtk.MESSAGE_QUESTION, has_cancel = False):
"""
Display a simple Yes / No dialog. Returns one of gtk.RESPONSE_{YES,NO,CANCEL}.
"""
prompt_dialog = gtk.MessageDialog(parent, 0, type, gtk.BUTTONS_NONE, message)
prompt_dialog.add_buttons(gtk.STOCK_YES, gtk.RESPONSE_YES, gtk.STOCK_NO, gtk.RESPONSE_NO)
if has_cancel:
prompt_dialog.add_buttons(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
prompt_dialog.connect("response", prompt_response)
prompt_dialog.run()
return prompt_response.response
def long_message(parent, message):
"""
Display a message prompt, with the message contained within a scrolled window.
"""
message_dialog = gtk.Dialog(parent = parent, buttons = (gtk.STOCK_OK, gtk.RESPONSE_ACCEPT))
message_dialog.set_default_size(400, 300)
message_dialog.connect("response", close_dialog)
scrolled_window = gtk.ScrolledWindow()
message_dialog.vbox.add(scrolled_window)
scrolled_window.show()
scrolled_window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
text_view = gtk.TextView()
scrolled_window.add(text_view)
text_view.show()
text_view.get_buffer().set_text(message)
text_view.set_cursor_visible(False)
text_view.set_property("editable", False)
text_view.set_property("height-request", 180)
text_view.set_property("width-request", 240)
message_dialog.run()
return
def error(parent, message):
"""
Display an error message.
"""
error_dialog = gtk.MessageDialog(parent, 0, gtk.MESSAGE_WARNING, gtk.BUTTONS_OK, message)
error_dialog.connect("response", close_dialog)
error_dialog.run()
return
def error_tb(parent, message):
"""
Display an error message, together with the last traceback.
"""
tb = traceback.format_exception(sys.exc_info()[0] ,sys.exc_info()[1], sys.exc_info()[2])
tb_msg = ""
for tbline in tb:
tb_msg += tbline
long_message(parent, tb_msg + "\n" + message)
return
def get_filename(title, action, filter_names_and_patterns = {}, folder_uri = None):
"""
Utility function to get a filename.
"""
if action == gtk.FILE_CHOOSER_ACTION_SAVE:
buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_SAVE,gtk.RESPONSE_OK)
elif action == gtk.FILE_CHOOSER_ACTION_CREATE_FOLDER:
buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_NEW,gtk.RESPONSE_OK)
else:
buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN,gtk.RESPONSE_OK)
filew = gtk.FileChooserDialog(title=title, action=action, buttons=buttons)
filew.set_default_response(gtk.RESPONSE_OK)
if not folder_uri is None:
filew.set_current_folder_uri("file://" + os.path.abspath(folder_uri))
for filtername in filter_names_and_patterns:
filter = gtk.FileFilter()
filter.set_name(filtername)
filter.add_pattern(filter_names_and_patterns[filtername])
filew.add_filter(filter)
allfilter = gtk.FileFilter()
allfilter.set_name("All known files")
for filtername in filter_names_and_patterns:
allfilter.add_pattern(filter_names_and_patterns[filtername])
filew.add_filter(allfilter)
filter = gtk.FileFilter()
filter.set_name("All files")
filter.add_pattern("*")
filew.add_filter(filter)
result = filew.run()
if result == gtk.RESPONSE_OK:
filename = filew.get_filename()
filtername = filew.get_filter().get_name()
filew.destroy()
return filename
else:
filew.destroy()
return None
def console(parent, locals = None):
"""
Launch a python console.
"""
console_dialog = gtk.Dialog(parent = parent, buttons = (gtk.STOCK_QUIT, gtk.RESPONSE_ACCEPT))
console_dialog.set_default_size(400, 300)
console_dialog.connect("response", close_dialog)
stdout = sys.stdout
stderr = sys.stderr
console_widget = pygtkconsole.GTKInterpreterConsole(locals)
console_dialog.vbox.add(console_widget)
console_widget.show()
console_dialog.run()
sys.stdout = stdout
sys.stderr = stderr
return
def prompt_response(dialog, response_id):
"""
Signal handler for dialog response signals. Stores the dialog response in the
function namespace, to allow response return in other functions.
"""
if response_id == gtk.RESPONSE_DELETE_EVENT:
response_id = gtk.RESPONSE_CANCEL
prompt_response.response = response_id
close_dialog(dialog, response_id)
return
def close_dialog(dialog, response_id = None):
"""
Signal handler for dialog reponse or destroy signals. Closes the dialog.
"""
dialog.destroy()
return
| 28.193662 | 95 | 0.723242 |
16c86dba44c4d72104ae5760fa8ff0a89daa4441
| 5,793 |
py
|
Python
|
src/mazes.py
|
tim-fi/pyxel_games
|
3df9d7e1f3d5436d2051db3f5783bdeab916c054
|
[
"Unlicense"
] | 2 |
2021-04-03T09:49:46.000Z
|
2021-12-27T19:32:32.000Z
|
src/mazes.py
|
tim-fi/pyxel_games
|
3df9d7e1f3d5436d2051db3f5783bdeab916c054
|
[
"Unlicense"
] | null | null | null |
src/mazes.py
|
tim-fi/pyxel_games
|
3df9d7e1f3d5436d2051db3f5783bdeab916c054
|
[
"Unlicense"
] | null | null | null |
from __future__ import annotations
from dataclasses import dataclass, field, InitVar
from typing import List, Tuple, Iterator, Iterable, Optional
from random import choice
import pyxel
# -------------------------------------------------------
# Types
# -------------------------------------------------------
Maze = Tuple[int, ...]
# -------------------------------------------------------
# Constants
# -------------------------------------------------------
SCALE = 3
BOARD_WIDTH = 32
BOARD_HEIGHT = 32
CELL_SIZE = 6
CELL_COLOR = 15
WALL_SIZE = 1
WALL_COLOR = 5
# Flags
UP = 1 << 0
LEFT = 1 << 1
DOWN = 1 << 2
RIGHT = 1 << 3
VISTED = 1 << 4
# Calculated
N_CELLS = BOARD_WIDTH * BOARD_HEIGHT
BLOCK_SIZE = CELL_SIZE + WALL_SIZE * 2
WINDOW_WIDTH = BOARD_WIDTH * BLOCK_SIZE
WINDOW_HEIGHT = BOARD_HEIGHT * BLOCK_SIZE
NEIGHBORS = ((0, -1), (-1, 0), (0, 1), (1, 0))
# -------------------------------------------------------
# Maze
# -------------------------------------------------------
# -------------------------------------------------------
# Application
# -------------------------------------------------------
if __name__ == '__main__':
App().run()
| 32.544944 | 79 | 0.468324 |
16c8cf672763555c8ebe97c11704c5a42703427b
| 5,536 |
py
|
Python
|
bobjiang/settings.py
|
bobjiangps/django-blog
|
6afd36fa96c5a027546575b362b0a481c5d7c1a5
|
[
"MIT"
] | 3 |
2019-10-25T13:08:04.000Z
|
2020-01-05T11:29:18.000Z
|
bobjiang/settings.py
|
bobjiangps/django-blog
|
6afd36fa96c5a027546575b362b0a481c5d7c1a5
|
[
"MIT"
] | 9 |
2020-05-10T10:13:56.000Z
|
2022-03-11T23:33:52.000Z
|
bobjiang/settings.py
|
bobjiangps/django-blog
|
6afd36fa96c5a027546575b362b0a481c5d7c1a5
|
[
"MIT"
] | 3 |
2019-02-11T02:55:51.000Z
|
2020-01-05T11:29:20.000Z
|
"""
Django settings for bobjiang project.
Generated by 'django-admin startproject' using Django 2.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import json
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
with open(os.path.join(BASE_DIR, "store.json"), "r") as store_file:
STORED = json.load(store_file)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = STORED['secret_key']
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = True
DEBUG = False
RECORD_VISITOR = True
# RECORD_VISITOR = False
ALLOWED_HOSTS = ['*',]
APPEND_SLASH = True
# Application definition
INSTALLED_APPS = [
'haystack',
'blog.apps.BlogConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
'comments',
'ckeditor',
'ckeditor_uploader',
'tool',
'accounting',
#'xadmin',
#'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bobjiang.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'bobjiang.context_processors.device'
],
},
},
]
WSGI_APPLICATION = 'bobjiang.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': STORED['db_name'],
'USER': STORED['db_user'],
'PASSWORD': STORED['db_pw'],
'HOST': '127.0.0.1',
'PORT': 3306,
'OPTIONS': {
'autocommit': True,
},
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
#LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
#STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
#STATIC_ROOT = '/home/bob/djproject/bobjiang/blog/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
CKEDITOR_UPLOAD_PATH = 'upload/'
CKEDITOR_IMAGE_BACKEND = 'pillow'
CKEDITOR_BROWSE_SHOW_DIRS = True
CKEDITOR_RESTRICT_BY_USER = True
CKEDITOR_CONFIGS = {
'default': {
'toolbar': (['div', 'Source', '-', 'Save', 'NewPage', 'Preview', '-', 'Templates'],
['Cut', 'Copy', 'Paste', 'PasteText', 'PasteFromWord', '-','Print','SpellChecker','Scayt'],
['Undo', 'Redo', '-', 'Find', 'Replace', '-', 'SelectAll', 'RemoveFormat','-','Maximize', 'ShowBlocks', '-',"CodeSnippet", 'Subscript', 'Superscript'],
['Form', 'Checkbox', 'Radio', 'TextField', 'Textarea', 'Select', 'Button', 'ImageButton',
'HiddenField'],
['Bold', 'Italic', 'Underline', 'Strike', '-'],
['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', 'Blockquote'],
['JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock'],
['Link', 'Unlink', 'Anchor'],
['Image', 'Flash', 'Table', 'HorizontalRule', 'Smiley', 'SpecialChar', 'PageBreak'],
['Styles', 'Format', 'Font', 'FontSize'],
['TextColor', 'BGColor'],
),
'extraPlugins': 'codesnippet',
}
}
# haystack
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'blog.whoosh_cn_backend.WhooshEngine',
'PATH': os.path.join(BASE_DIR, 'whoosh_index'),
},
}
HAYSTACK_SEARCH_RESULTS_PER_PAGE = 5
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
| 28.984293 | 171 | 0.638728 |
16c94789f75ac4c3a4caedf7d0832ce6641802d7
| 671 |
py
|
Python
|
Users/models.py
|
titusnjuguna/FreeDom
|
204b3d06ba66e6e8a04af976a25c3c1b7c070f75
|
[
"MIT"
] | 1 |
2022-02-10T17:54:53.000Z
|
2022-02-10T17:54:53.000Z
|
Users/models.py
|
titusnjuguna/FreeDom
|
204b3d06ba66e6e8a04af976a25c3c1b7c070f75
|
[
"MIT"
] | null | null | null |
Users/models.py
|
titusnjuguna/FreeDom
|
204b3d06ba66e6e8a04af976a25c3c1b7c070f75
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
from PIL import Image
| 29.173913 | 60 | 0.588674 |
16c9a5ddd1d3e1f33c18bfd269bc6097b27aa5a2
| 2,281 |
py
|
Python
|
dvc/__init__.py
|
zjj2wry/dvc
|
c9df567938eefd7b1f5b094c15f04e5ce704aa36
|
[
"Apache-2.0"
] | null | null | null |
dvc/__init__.py
|
zjj2wry/dvc
|
c9df567938eefd7b1f5b094c15f04e5ce704aa36
|
[
"Apache-2.0"
] | null | null | null |
dvc/__init__.py
|
zjj2wry/dvc
|
c9df567938eefd7b1f5b094c15f04e5ce704aa36
|
[
"Apache-2.0"
] | null | null | null |
"""
DVC
----
Make your data science projects reproducible and shareable.
"""
import os
import warnings
VERSION_BASE = '0.23.2'
__version__ = VERSION_BASE
PACKAGEPATH = os.path.abspath(os.path.dirname(__file__))
HOMEPATH = os.path.dirname(PACKAGEPATH)
VERSIONPATH = os.path.join(PACKAGEPATH, 'version.py')
def _update_version_file():
"""Dynamically update version file."""
from git import Repo
from git.exc import InvalidGitRepositoryError
try:
repo = Repo(HOMEPATH)
except InvalidGitRepositoryError:
return __version__
sha = repo.head.object.hexsha
short_sha = repo.git.rev_parse(sha, short=6)
dirty = '.mod' if repo.is_dirty() else ''
ver = '{}+{}{}'.format(__version__, short_sha, dirty)
# Write a helper file, that will be installed with the package
# and will provide a true version of the installed dvc
with open(VERSIONPATH, 'w+') as fobj:
fobj.write('# AUTOGENERATED by dvc/__init__.py\n')
fobj.write('version = "{}"\n'.format(ver))
return ver
def _remove_version_file():
"""Remove version.py so that it doesn't get into the release."""
if os.path.exists(VERSIONPATH):
os.unlink(VERSIONPATH)
if os.path.exists(os.path.join(HOMEPATH, 'setup.py')):
# dvc is run directly from source without installation or
# __version__ is called from setup.py
if os.getenv('APPVEYOR_REPO_TAG', '').lower() != 'true' \
and os.getenv('TRAVIS_TAG', '') == '':
__version__ = _update_version_file()
else: # pragma: no cover
_remove_version_file()
else: # pragma: no cover
# dvc was installed with pip or something. Hopefully we have our
# auto-generated version.py to help us provide a true version
from dvc.version import version
__version__ = version
VERSION = __version__
# Ignore numpy's runtime warnings: https://github.com/numpy/numpy/pull/432.
# We don't directly import numpy, but our dependency networkx does, causing
# these warnings in some environments. Luckily these warnings are benign and
# we can simply ignore them so that they don't show up when you are using dvc.
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
| 32.126761 | 78 | 0.702762 |
16c9bf4375ba49f6aaa19ea289549cfbf3ed1092
| 9,092 |
py
|
Python
|
pkg_dir/src/utils/notion_utils.py
|
robperch/robase_datalysis
|
343cb59b16630ca776bd941897ab8da63f20bfe1
|
[
"MIT"
] | 2 |
2022-01-09T19:18:57.000Z
|
2022-01-09T19:19:04.000Z
|
pkg_dir/src/utils/notion_utils.py
|
robperch/robasecode
|
343cb59b16630ca776bd941897ab8da63f20bfe1
|
[
"MIT"
] | 4 |
2022-01-17T02:46:24.000Z
|
2022-02-20T23:04:05.000Z
|
pkg_dir/src/utils/notion_utils.py
|
robperch/robasecode
|
343cb59b16630ca776bd941897ab8da63f20bfe1
|
[
"MIT"
] | null | null | null |
## MODULE WITH UTIL FUNCTIONS - NOTION
"----------------------------------------------------------------------------------------------------------------------"
####################################################### Imports ########################################################
"----------------------------------------------------------------------------------------------------------------------"
## Standard library imports
import requests
## Third party imports
import pandas as pd
## Local application imports
from pkg_dir.config.config import (
creds_file_path as crds_loc,
)
from pkg_dir.src.utils.general_utils import (
read_yaml,
)
"----------------------------------------------------------------------------------------------------------------------"
####################################################### Functions ######################################################
"----------------------------------------------------------------------------------------------------------------------"
## Read notion database with api
def notion_api_call(db_api_url, db_id, headers):
"""
Read notion database with api
:param db_api_url (string): base url provided by Notion to make api calls
:param db_id (string): unique id of the database that will be read
:param headers (dictionary): dict with authorization and version info
:return req (?): response after calling notions api
"""
## Configuring reading URL
read_url = db_api_url + db_id + "/query"
## Requesting info via the API
req = requests.request(
"POST",
read_url,
headers=headers
)
## Verifying API call status
print("API interaction status code: ", req.status_code)
return req
## Calling a Notion database as a json via Notion's API
def get_notion_db_json(db_id):
"""
Calling a Notion database as a json via Notion's API
:param db_id (string): unique id of the database that will be called
:return db_json (json): json with the notion's db contents
"""
## Reading credentials from yaml file
yaml_file = read_yaml(crds_loc)
notion_version = yaml_file["notion_api"]["notion_version"]
db_api_url = yaml_file["notion_api"]["db_api_url"]
api_key = yaml_file["notion_api"]["api_key"]
## Building headers for the API call
headers = {
"Authorization": "Bearer " + api_key,
"Notion-Version": notion_version
}
## Calling notion's api
req = notion_api_call(db_api_url, db_id, headers)
## Converting the api response to a json
db_json = req.json()
return db_json
## Crating a schema of the notion database that was read
def create_notion_db_schema(db_json, relevant_properties):
"""
Crating a schema of the notion database that was read
:param db_json (json): json object obtained by calling notion's api
:param relevant_properties (list): list of string with the names of the relevant properties
:return db_schema (dictionary): schema of the table that includes the properties' data type
"""
## Selecting a sample entry to go over all of it's properties
sample_entry = db_json["results"][0]["properties"]
## Bulding dictionary (schema) of the relevant properties and their datatypes
db_schema = {
prop: {
"data_type": sample_entry[prop]["type"]
}
for prop in sample_entry
if prop in relevant_properties
}
# print(db_schema)
return db_schema
## Building a the blueprint dictionary for the dataframe (orient=index)
def notion_db_blueprint_df(db_json, db_schema, index_prop):
"""
Building a the blueprint dictionary for the dataframe (orient=index)
:param db_json (json): json object obtained by calling notion's api
:return db_schema (dictionary): schema of the table that includes the properties' data type
:param index_prop (string): name of the property that will serve as the df's index
:return df_dict (dict): dictionary that will be used to create a dataframe with the json contents
"""
## Empty dictionary that will store all the results
df_dict = {}
## Iterating over every row in the dataframe
for row in db_json["results"]:
## Defining the table's base attributes
#### All properties contained in the notion db
row_props = row["properties"]
#### Name of the index; key attribute in the notion db
row_name = row_props[index_prop]["title"][0]["plain_text"]
#### Empty list to store all the row contents
row_contents = []
## Iterating over every relevant property in the table
for col in db_schema:
## Identifying the datatype of the property
data_type = db_schema[col]["data_type"]
## Set of conditions to determine how the row will be treated
#### Skipping the index row
if data_type == "title":
continue
#### Searching for data in specific locations for special data types (1)
elif data_type in ["select", "person", "created_by"]:
try:
row_contents.append(row_props[col][data_type]["name"])
except:
row_contents.append("No_data")
#### Searching for data in specific locations for special data types (2)
elif data_type in ["rich_text"]:
try:
row_contents.append(row_props[col][data_type][0]["text"]["content"])
except:
row_contents.append("No_data")
#### Searching for data in specific locations for special data types (2)
elif data_type in ["formula"]:
try:
#### Applying conditions based on the type of formula result
if row_props[col][data_type]["type"] == "string":
row_contents.append(row_props[col][data_type]["string"])
elif row_props[col][data_type]["type"] == "number":
row_contents.append(row_props[col][data_type]["number"])
except:
row_contents.append("No_data")
#### General procedure to find data
else:
row_contents.append(row_props[col][db_schema[col]["data_type"]])
## Saving the row contents gathered
df_dict[row_name] = row_contents
return df_dict
## Obtaining a dataframe from a notion database
def notion_json_to_df(db_json, relevant_properties):
"""
Obtaining a dataframe from a notion database
:param db_json (json): json object obtained by calling notion's api
:param relevant_properties (list): list of string with the names of the relevant properties
:return df_n (dataframe): resulting dataframe crated based on the blueprint generated
"""
## General parameters needed to build the dataframe
#### Database schema
db_schema = create_notion_db_schema(db_json, relevant_properties)
#### Property that will be used as the dataframe's index
index_prop = [prop for prop in db_schema if db_schema[prop]["data_type"] == "title"][0]
## Building a the blueprint dictionary for the dataframe (orient=index)
df_dict = notion_db_blueprint_df(db_json, db_schema, index_prop)
## Creating dataframe with the resulting blueprint dictionary
#### Crating dataframe
df_n = pd.DataFrame.from_dict(df_dict, orient="index")
#### Inserting the table's index as a column at the end of the df
df_n.insert(
df_n.shape[1],
index_prop,
df_n.index
)
#### Resetting index
df_n.reset_index(inplace=True, drop=True)
#### Adjusting column names
df_n.columns = [col_n for col_n in db_schema]
return df_n
## Obtaining a Notion database as dataframe with the selected columns
def notion_db_to_df(db_id, relevant_properties):
"""
Obtaining a Notion database as dataframe with the selected columns
:param db_id (string): unique id to identify the notion database
:param relevant_properties (list): list of string with the names of the relevant properties
:return df_n (dataframe): resulting dataframe crated based on the blueprint generated
"""
## Calling a Notion database as a json via Notion's API
db_json = get_notion_db_json(db_id)
## Obtaining a dataframe from a notion database
df_n = notion_json_to_df(db_json, relevant_properties)
return df_n
"----------------------------------------------------------------------------------------------------------------------"
"----------------------------------------------------------------------------------------------------------------------"
## END OF FILE ##
"----------------------------------------------------------------------------------------------------------------------"
"----------------------------------------------------------------------------------------------------------------------"
| 30.006601 | 120 | 0.569182 |
16caf6d3ac2e6621185a4d16c03069163552a572
| 8,371 |
py
|
Python
|
libpermian/issueanalyzer/test_baseissue.py
|
velezd/permian
|
b52189f44c3112ad933a6b1e303a6b30c272651a
|
[
"MIT"
] | null | null | null |
libpermian/issueanalyzer/test_baseissue.py
|
velezd/permian
|
b52189f44c3112ad933a6b1e303a6b30c272651a
|
[
"MIT"
] | 9 |
2022-02-07T14:14:10.000Z
|
2022-03-22T09:17:16.000Z
|
libpermian/issueanalyzer/test_baseissue.py
|
velezd/permian
|
b52189f44c3112ad933a6b1e303a6b30c272651a
|
[
"MIT"
] | 3 |
2022-01-20T09:17:39.000Z
|
2022-03-08T00:35:58.000Z
|
import unittest
import logging
import contextlib
from libpermian.settings import Settings
from .proxy import IssueAnalyzerProxy
from .base import BaseAnalyzer, BaseIssue
from .issueset import IssueSet
LOGGER = logging.getLogger('test')
# TrackedResolvedIssue should behave the same way as TrackedUnresolvedIssue
# so just inherit the whole test case to run the very same test
# The update_issue should have no effect when create_issues_instead_of_update
# is set to True.
| 29.896429 | 79 | 0.611158 |
16cb0577b93ac4b27ff6f443a2d517ea18cbf9f7
| 6,421 |
py
|
Python
|
naplib/alignment/prosodylab_aligner/__main__.py
|
gavinmischler/naplib-python
|
8cd7a0fc700f1c07243169ec42fc087955885adc
|
[
"MIT"
] | 1 |
2022-03-02T20:54:23.000Z
|
2022-03-02T20:54:23.000Z
|
naplib/alignment/prosodylab_aligner/__main__.py
|
gavinmischler/gavlib
|
cacf9180b1442e4aed98b6182d586747a6d6ef90
|
[
"MIT"
] | null | null | null |
naplib/alignment/prosodylab_aligner/__main__.py
|
gavinmischler/gavlib
|
cacf9180b1442e4aed98b6182d586747a6d6ef90
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2011-2014 Kyle Gorman and Michael Wagner
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Command-line driver for the module
"""
import logging
import os
import sys
import yaml
from bisect import bisect
from shutil import copyfile
from textgrid import MLF
from corpus import Corpus
from aligner import Aligner
from archive import Archive
from utilities import splitname, resolve_opts, \
ALIGNED, CONFIG, HMMDEFS, MACROS, SCORES
from argparse import ArgumentParser
DICTIONARY = "eng.dict"
MODEL = "eng.zip"
LOGGING_FMT = "%(message)s"
# parse arguments
argparser = ArgumentParser(prog="{} -m aligner".format(sys.executable),
description="Prosodylab-Aligner")
argparser.add_argument("-c", "--configuration",
help="config file")
argparser.add_argument("-d", "--dictionary", metavar="DICT", action="append",
help="dictionary file (default: {}) (can specify multiple)".format(DICTIONARY))
argparser.add_argument("-s", "--samplerate", type=int,
help="analysis samplerate (in Hz)")
argparser.add_argument("-e", "--epochs", type=int,
help="# of epochs of training per round")
input_group = argparser.add_argument_group()
input_group.add_argument("-r", "--read",
help="source for a precomputed acoustic model")
input_group.add_argument("-t", "--train",
help="directory containing data for training")
output_group = argparser.add_mutually_exclusive_group(required=True)
output_group.add_argument("-a", "--align",
help="directory containing data to align")
output_group.add_argument("-w", "--write",
help="destination for computed acoustic model")
verbosity_group = argparser.add_mutually_exclusive_group()
verbosity_group.add_argument("-v", "--verbose", action="store_true",
help="Verbose output")
verbosity_group.add_argument("-V", "--extra-verbose", action="store_true",
help="Even more verbose output")
args = argparser.parse_args()
# hack to allow proper override of default dictionary
if not args.dictionary:
args.dictionary = [DICTIONARY]
# set up logging
loglevel = logging.WARNING
if args.extra_verbose:
loglevel = logging.DEBUG
elif args.verbose:
loglevel = logging.INFO
logging.basicConfig(format=LOGGING_FMT, level=loglevel)
# input: pick one
if args.train:
if args.read:
logging.error("Cannot train on persistent model.")
exit(1)
logging.info("Preparing corpus '{}'.".format(args.train))
opts = resolve_opts(args)
corpus = Corpus(args.train, opts)
logging.info("Preparing aligner.")
aligner = Aligner(opts)
logging.info("Training aligner on corpus '{}'.".format(args.train))
aligner.HTKbook_training_regime(corpus, opts["epochs"],
flatstart=(args.read is None))
else:
if not args.read:
args.read = MODEL
logging.info("Reading aligner from '{}'.".format(args.read))
# warn about irrelevant flags
if args.configuration:
logging.warning("Ignoring config flag (-c/--configuration).")
args.configuration = None
if args.epochs:
logging.warning("Ignoring epochs flag (-e/--epochs).")
if args.samplerate:
logging.warning("Ignoring samplerate flag (-s/--samplerate).")
args.samplerate = None
# create archive from -r argument
archive = Archive(args.read)
# read configuration file therefrom, and resolve options with it
args.configuration = os.path.join(archive.dirname, CONFIG)
opts = resolve_opts(args)
# initialize aligner and set it to point to the archive data
aligner = Aligner(opts)
aligner.curdir = archive.dirname
# output: pick one
if args.align:
# check to make sure we're not aligning on the training data
if (not args.train) or (os.path.realpath(args.train) !=
os.path.realpath(args.align)):
logging.info("Preparing corpus '{}'.".format(args.align))
corpus = Corpus(args.align, opts)
logging.info("Aligning corpus '{}'.".format(args.align))
aligned = os.path.join(args.align, ALIGNED)
scores = os.path.join(args.align, SCORES)
aligner.align_and_score(corpus, aligned, scores)
logging.debug("Wrote MLF file to '{}'.".format(aligned))
logging.debug("Wrote likelihood scores to '{}'.".format(scores))
logging.info("Writing TextGrids.")
size = MLF(aligned).write(args.align)
if not size:
logging.error("No paths found!")
exit(1)
logging.debug("Wrote {} TextGrids.".format(size))
elif args.write:
# create and populate archive
(_, basename, _) = splitname(args.write)
archive = Archive.empty(basename)
archive.add(os.path.join(aligner.curdir, HMMDEFS))
archive.add(os.path.join(aligner.curdir, MACROS))
# whatever this is, it's not going to work once you move the data
if "dictionary" in opts:
del opts["dictionary"]
with open(os.path.join(archive.dirname, CONFIG), "w") as sink:
yaml.dump(opts, sink)
(basename, _) = os.path.splitext(args.write)
archive_path = os.path.relpath(archive.dump(basename))
logging.info("Wrote aligner to '{}'.".format(archive_path))
# else unreachable
logging.info("Success!")
| 40.13125 | 102 | 0.68167 |
16cc459343115a5e0d636bad4bf667af5c4f5d6d
| 4,021 |
py
|
Python
|
init/build_statements.py
|
andgein/sis-2017-winter-olymp
|
e6cf290ab2c24a22ca76949895e2a6cc6d818dc0
|
[
"MIT"
] | null | null | null |
init/build_statements.py
|
andgein/sis-2017-winter-olymp
|
e6cf290ab2c24a22ca76949895e2a6cc6d818dc0
|
[
"MIT"
] | null | null | null |
init/build_statements.py
|
andgein/sis-2017-winter-olymp
|
e6cf290ab2c24a22ca76949895e2a6cc6d818dc0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import codecs
import os
import os.path
import shutil
import subprocess
import logging
import glob
import json
CONTEST_DIR = 'polygon-contest'
INIT_FILE = 'init.txt'
BUILD_DIR = 'build'
LANGUAGE = 'russian'
FILES_DIR = 'files-' + LANGUAGE
if __name__ == '__main__':
main()
| 39.038835 | 123 | 0.607063 |
16cc8a900ca38b32bc2a6bbb0fff269ef5b921da
| 1,430 |
py
|
Python
|
conanfile.py
|
mmurooka/mc_rtc_data
|
bf45279cc59f9d85915cb2a01a84c23e5ce45958
|
[
"BSD-2-Clause"
] | 1 |
2021-04-12T06:02:53.000Z
|
2021-04-12T06:02:53.000Z
|
conanfile.py
|
mmurooka/mc_rtc_data
|
bf45279cc59f9d85915cb2a01a84c23e5ce45958
|
[
"BSD-2-Clause"
] | 3 |
2020-06-18T10:01:15.000Z
|
2021-11-08T12:43:43.000Z
|
conanfile.py
|
mmurooka/mc_rtc_data
|
bf45279cc59f9d85915cb2a01a84c23e5ce45958
|
[
"BSD-2-Clause"
] | 4 |
2020-03-12T08:57:41.000Z
|
2021-09-07T03:07:56.000Z
|
# -*- coding: utf-8 -*-
#
from conans import python_requires
import conans.tools as tools
import os
base = python_requires("Eigen3ToPython/latest@multi-contact/dev")
| 34.878049 | 168 | 0.652448 |
16cd7731b200cbda5815fed9bc8eb8baf3b78188
| 1,217 |
py
|
Python
|
hyperion/migrations/0006_auto_20190218_2251.py
|
ExiaSR/hyperion
|
0b14ef55ed00b964f1966c722f4162c475aa4895
|
[
"MIT"
] | 3 |
2019-01-30T03:50:04.000Z
|
2019-02-20T00:33:05.000Z
|
hyperion/migrations/0006_auto_20190218_2251.py
|
ExiaSR/hyperion
|
0b14ef55ed00b964f1966c722f4162c475aa4895
|
[
"MIT"
] | 173 |
2019-01-30T08:30:54.000Z
|
2019-04-05T19:43:06.000Z
|
hyperion/migrations/0006_auto_20190218_2251.py
|
ExiaSR/hyperion
|
0b14ef55ed00b964f1966c722f4162c475aa4895
|
[
"MIT"
] | 2 |
2019-05-06T22:59:56.000Z
|
2020-09-29T03:13:03.000Z
|
# Generated by Django 2.1.5 on 2019-02-18 22:51
from django.db import migrations, models
import django.db.models.deletion
| 34.771429 | 201 | 0.571898 |
16cdaac129cd705700eab605365385f7b7b8a82c
| 2,236 |
py
|
Python
|
pottan_ocr/utils.py
|
nithyadurai87/pottan-ocr-tamil
|
e455891dc0ddd508d1318abf84fc59cc548873f7
|
[
"MIT"
] | 5 |
2019-05-05T18:26:14.000Z
|
2019-08-02T05:04:12.000Z
|
pottan_ocr/utils.py
|
nithyadurai87/pottan-ocr-tamil
|
e455891dc0ddd508d1318abf84fc59cc548873f7
|
[
"MIT"
] | 3 |
2020-07-17T02:28:11.000Z
|
2021-05-08T21:58:10.000Z
|
pottan_ocr/utils.py
|
nithyadurai87/pottan-ocr-tamil
|
e455891dc0ddd508d1318abf84fc59cc548873f7
|
[
"MIT"
] | 3 |
2020-04-11T19:39:08.000Z
|
2020-12-21T08:44:21.000Z
|
import torch
import json
import numpy as np
from torch.autograd import Variable
import gzip
import yaml
from re import split
from matplotlib import pyplot
config = readYaml('./config.yaml')
def loadTrainedModel( model, opt ):
"""Load a pretrained model into given model"""
print('loading pretrained model from %s' % opt.crnn)
if( opt.cuda ):
stateDict = torch.load(opt.crnn )
else:
stateDict = torch.load(opt.crnn, map_location={'cuda:0': 'cpu'} )
# Handle the case of some old torch version. It will save the data as module.<xyz> . Handle it
if( list( stateDict.keys() )[0][:7] == 'module.' ):
for key in list(stateDict.keys()):
stateDict[ key[ 7:] ] = stateDict[key]
del stateDict[ key ]
model.load_state_dict( stateDict )
print('Completed loading pre trained model')
| 24.304348 | 99 | 0.58542 |
16cef8471ab7389079cb6001c00f1c83826a7643
| 1,546 |
py
|
Python
|
pyvips/error.py
|
kleisauke/pyvips
|
ae3b0c09669cfb662e773e8ae69cf589ac15e320
|
[
"MIT"
] | null | null | null |
pyvips/error.py
|
kleisauke/pyvips
|
ae3b0c09669cfb662e773e8ae69cf589ac15e320
|
[
"MIT"
] | null | null | null |
pyvips/error.py
|
kleisauke/pyvips
|
ae3b0c09669cfb662e773e8ae69cf589ac15e320
|
[
"MIT"
] | null | null | null |
# errors from libvips
import sys
import logging
from pyvips import ffi, vips_lib
logger = logging.getLogger(__name__)
_is_PY3 = sys.version_info[0] == 3
if _is_PY3:
text_type = str
else:
text_type = unicode
ffi.cdef('''
const char* vips_error_buffer (void);
void vips_error_clear (void);
''')
def _to_bytes(x):
"""Convert to a byte string.
Convert a Python unicode string to a utf-8-encoded byte string. You must
call this on strings you pass to libvips.
"""
if isinstance(x, text_type):
x = x.encode()
return x
def _to_string(x):
"""Convert to a unicode string.
If x is a byte string, assume it is utf-8 and decode to a Python unicode
string. You must call this on text strings you get back from libvips.
"""
if _is_PY3 and isinstance(x, bytes):
x = x.decode('utf-8')
return x
__all__ = [
'_to_bytes', '_to_string', 'Error',
]
| 20.891892 | 76 | 0.638422 |
16cf7d6d5783bc8dc6f881f5646090c8b7e4317c
| 7,584 |
py
|
Python
|
population_estimator/curses_io.py
|
cruzanta/population-estimator
|
cb56c551b615726543d8b1643302be2d30fd593c
|
[
"MIT"
] | 1 |
2019-02-10T01:30:09.000Z
|
2019-02-10T01:30:09.000Z
|
population_estimator/curses_io.py
|
cruzantada/population-estimator
|
cb56c551b615726543d8b1643302be2d30fd593c
|
[
"MIT"
] | null | null | null |
population_estimator/curses_io.py
|
cruzantada/population-estimator
|
cb56c551b615726543d8b1643302be2d30fd593c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Module for painting output on and obtaining input from a text-based terminal
window using the curses library.
"""
import curses
import textwrap
def display_string_with_prompt(screen, first_line_num, a_string, prompt):
"""Paints two strings and accepts input.
Paints two strings on a text-based terminal window. The latter of the two
strings serves as the prompt for the user to enter input.
Args:
screen: A window object that represents the text-based terminal window.
first_line_num: An integer that represents the location along the y-axis
of the terminal window where the first character of the first string
is painted.
a_string: The first string that is painted on the terminal window.
prompt: A string that serves as a prompt for the user to enter input.
Returns:
A string that the user enters in as input.
"""
screen.clear()
output_line = first_line_num
output_line = display_string(screen, a_string, output_line)
output_line += 3
output_line = display_string(screen, prompt, output_line)
screen.refresh()
return screen.getstr(output_line, len(prompt) + 1)
def display_list_items_with_prompt(screen, first_line_num, a_string, a_list,
prompt):
"""Paints a string, each item of a list, and accepts input.
Paints a string, each item of a list, and another string on a text-based
terminal window. Each item of the list is painted on its own line.
The second string serves as a prompt for the user to enter input.
Args:
screen: A window object that represents the text-based terminal window.
first_line_num: An integer that represents the location along the y-axis
of the terminal window where the first character of the first string
is painted.
a_string: The first string that is painted on the terminal window.
a_list: A list whose items are painted on each line of the terminal
window.
prompt: A string that serves as a prompt for the user to enter input.
Returns:
A string that the user enters in as input.
"""
screen.clear()
output_line = first_line_num
output_line = display_string(screen, a_string, output_line)
output_line += 2
output_line = display_list_items(screen, a_list, output_line)
output_line += 1
output_line = display_string(screen, prompt, output_line)
screen.refresh()
return screen.getstr(output_line, len(prompt) + 1)
def display_formatted_dicts_with_prompt(screen, first_line_num, a_string,
list_of_dicts, prompt):
"""Paints a string, each item of each dict in a list, and accepts input.
Paints a string, each item of each dict in a list, and another string on a
text-based terminal window. Each key, value pair of each dict is painted on
its own line with the key and value separated by a colon. The second string
serves as a prompt for the user to enter input.
Args:
screen: A window object that represents the text-based terminal window.
first_line_num: An integer that represents the location along the y-axis
of the terminal window where the first character of the first string
is painted.
a_string: The first string that is painted on the terminal window.
list_of_dicts: A list of dictionaries whose key, value pairs are painted
on their own line of the terminal window.
prompt: A string that serves as a prompt for the user to enter input.
Returns:
A string that the user enters in as input.
"""
screen.clear()
output_line = first_line_num
output_line = display_string(screen, a_string, output_line)
output_line += 2
for dct in list_of_dicts:
output_line = display_formatted_dict(screen, dct, output_line)
output_line += 1
output_line += 1
output_line = display_string(screen, prompt, output_line)
screen.refresh()
return screen.getstr(output_line, len(prompt) + 1)
def get_user_menu_selection(screen, first_line_num, a_string, menu_items,
prompt):
"""Paints a string, a menu, and accepts input.
Paints a string, a menu, and another string on a text-based terminal window.
The menu is composed of the items in a list, and each item is assigned its
own number that represents the order in which the item appears in the menu.
The second string serves as a prompt for the user to enter a number from the
menu.
Args:
screen: A window object that represents the text-based terminal window.
first_line_num: An integer that represents the location along the y-axis
of the terminal window where the first character of the first string
is painted.
a_string: The first string that is painted on the terminal window.
menu_items: A list whose items are painted on each line of the terminal
window as menu options.
prompt: A string that serves as a prompt for the user to enter a number
from the menu.
Returns:
A string representation of the item in 'menu_items' that the user
selects.
"""
# Create a dictionary that contains the items in 'menu_items'. Each item
# is added as a value with an integer key that represents the order in which
# the item will appear in the menu.
item_key = 1
selection_items = {}
for item in menu_items:
selection_items['%s' % (item_key)] = item
item_key += 1
# Display the menu and prompt the user for a selection.
while True:
screen.clear()
output_line = first_line_num
output_line = display_string(screen, a_string, output_line)
output_line += 3
for menu_num in sorted(selection_items.iterkeys()):
item_line = '%s) %s' % (menu_num, selection_items[menu_num])
output_line = display_string(screen, item_line, output_line)
output_line += 1
output_line += 1
output_line = display_string(screen, prompt, output_line)
screen.refresh()
input = screen.getstr(output_line, len(prompt) + 1)
if input not in selection_items.keys():
continue # Force the user to enter a valid selection.
else:
return selection_items[input]
| 36.114286 | 80 | 0.676292 |
16d095db1ff9ef61a032d5e0564695c1cb47f1b3
| 6,986 |
py
|
Python
|
SAP/released_tr_email_sender/ui.py
|
botisko/personal_programs
|
2e234271db438e228b9028b8180a6e833f482104
|
[
"MIT"
] | null | null | null |
SAP/released_tr_email_sender/ui.py
|
botisko/personal_programs
|
2e234271db438e228b9028b8180a6e833f482104
|
[
"MIT"
] | 1 |
2021-01-08T13:25:16.000Z
|
2021-01-08T13:25:16.000Z
|
SAP/released_tr_email_sender/ui.py
|
botisko/personal_programs
|
2e234271db438e228b9028b8180a6e833f482104
|
[
"MIT"
] | 1 |
2021-01-08T12:52:29.000Z
|
2021-01-08T12:52:29.000Z
|
import json
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from tr_data import TRData, NO_DATA_MEETS_CRITERIA
from email_text import email_body_template
from helpers import send_email
RECIPIENT = <email_address>
EXCEPTION_FILE = "tr_number_exceptions.json"
| 38.174863 | 134 | 0.61838 |
16d0a3ae5b7a5043417a9ada134eda9cc4f2dd27
| 1,548 |
py
|
Python
|
AI-Practice-Tensorflow-Notes-master/opt/opt4_8_backward.py
|
foochane/Tensorflow-Learning
|
54d210a1286051e9d60c98a62bd63eb070bc0a11
|
[
"Apache-2.0"
] | 2 |
2019-01-23T14:23:17.000Z
|
2019-01-23T14:23:49.000Z
|
AI-Practice-Tensorflow-Notes-master/opt/opt4_8_backward.py
|
foochane/Tensorflow-Learning
|
54d210a1286051e9d60c98a62bd63eb070bc0a11
|
[
"Apache-2.0"
] | null | null | null |
AI-Practice-Tensorflow-Notes-master/opt/opt4_8_backward.py
|
foochane/Tensorflow-Learning
|
54d210a1286051e9d60c98a62bd63eb070bc0a11
|
[
"Apache-2.0"
] | null | null | null |
#coding:utf-8
#0
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import opt4_8_generateds
import opt4_8_forward
STEPS = 40000
BATCH_SIZE = 30
LEARNING_RATE_BASE = 0.001
LEARNING_RATE_DECAY = 0.999
REGULARIZER = 0.01
if __name__=='__main__':
backward()
| 24.967742 | 72 | 0.700258 |
16d13aced6b20979dea691425018aa9f0ea80fb3
| 3,168 |
py
|
Python
|
test/examples/integrated/codec/vip/vip_agent.py
|
rodrigomelo9/uvm-python
|
e3127eba2cc1519a61dc6f736d862a8dcd6fce20
|
[
"Apache-2.0"
] | 140 |
2020-01-18T00:14:17.000Z
|
2022-03-29T10:57:24.000Z
|
test/examples/integrated/codec/vip/vip_agent.py
|
Mohsannaeem/uvm-python
|
1b8768a1358d133465ede9cadddae651664b1d53
|
[
"Apache-2.0"
] | 24 |
2020-01-18T18:40:58.000Z
|
2021-03-25T17:39:07.000Z
|
test/examples/integrated/codec/vip/vip_agent.py
|
Mohsannaeem/uvm-python
|
1b8768a1358d133465ede9cadddae651664b1d53
|
[
"Apache-2.0"
] | 34 |
2020-01-18T12:22:59.000Z
|
2022-02-11T07:03:11.000Z
|
#//
#// -------------------------------------------------------------
#// Copyright 2011 Synopsys, Inc.
#// Copyright 2019-2020 Tuomas Poikela (tpoikela)
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#// -------------------------------------------------------------
#//
from uvm import *
from .vip_sequencer import vip_sequencer
from .vip_driver import vip_driver
from .vip_monitor import vip_monitor
uvm_component_utils(vip_agent)
| 33 | 96 | 0.606376 |
16d1b5218231a945c48c3095503b717e135149a2
| 7,987 |
py
|
Python
|
tests/test_transliterate.py
|
abosoar/camel_tools
|
0a92c06f6dde0063e26df5cbe4d74c2f99b418e0
|
[
"MIT"
] | 1 |
2021-03-23T12:50:47.000Z
|
2021-03-23T12:50:47.000Z
|
tests/test_transliterate.py
|
KaoutharMokrane/camel_tools
|
e9099907835b05d448362bce2cb0e815ac7f5590
|
[
"MIT"
] | null | null | null |
tests/test_transliterate.py
|
KaoutharMokrane/camel_tools
|
e9099907835b05d448362bce2cb0e815ac7f5590
|
[
"MIT"
] | 1 |
2021-01-24T05:06:33.000Z
|
2021-01-24T05:06:33.000Z
|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2020 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Tests for camel_tools.transliterate.
"""
from __future__ import absolute_import
import pytest
from camel_tools.utils.charmap import CharMapper
from camel_tools.utils.transliterate import Transliterator
# A mapper that translates lower-case English characters to a lower-case x and
# upper-case English characters to an upper-case X. This makes it easy to
# predict what the transliteration should be.
TEST_MAP = {
u'A-Z': u'X',
u'a-z': u'x',
}
TEST_MAPPER = CharMapper(TEST_MAP, None)
| 33.700422 | 79 | 0.662076 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.