hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5fc115feb7229821fab8bd49844fdb6a161d73e2
| 408 |
py
|
Python
|
deploy/api/src/schemas/koe_favorite_schema.py
|
bonybody/2020_hew_app
|
d09cdafd55348ed70424a443d8619114cae3d27f
|
[
"MIT"
] | 1 |
2021-06-03T02:54:51.000Z
|
2021-06-03T02:54:51.000Z
|
deploy/api/src/schemas/koe_favorite_schema.py
|
bonybody/agri
|
d09cdafd55348ed70424a443d8619114cae3d27f
|
[
"MIT"
] | 19 |
2021-01-01T09:48:51.000Z
|
2021-04-08T09:11:30.000Z
|
deploy/api/src/schemas/koe_favorite_schema.py
|
bonybody/agri
|
d09cdafd55348ed70424a443d8619114cae3d27f
|
[
"MIT"
] | 1 |
2021-09-28T11:54:25.000Z
|
2021-09-28T11:54:25.000Z
|
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from database.database import ma
from models import KoeFavorite
from .user_schema import UserSchema
from .koe_schema import KoeSchema
| 25.5 | 77 | 0.742647 |
5fc3fd1b7cba71af7933022261d214435bda9000
| 2,786 |
py
|
Python
|
results/baseline/parse_rollout.py
|
XiaoSanchez/autophase
|
3d8d173ad27b9786e36efd22d0ceacbcf1cb1dfb
|
[
"BSD-3-Clause"
] | 14 |
2020-04-03T12:41:50.000Z
|
2022-02-04T00:05:01.000Z
|
results/baseline/parse_rollout.py
|
XiaoSanchez/autophase
|
3d8d173ad27b9786e36efd22d0ceacbcf1cb1dfb
|
[
"BSD-3-Clause"
] | 2 |
2020-03-02T04:32:58.000Z
|
2021-09-15T20:02:25.000Z
|
results/baseline/parse_rollout.py
|
XiaoSanchez/autophase
|
3d8d173ad27b9786e36efd22d0ceacbcf1cb1dfb
|
[
"BSD-3-Clause"
] | 8 |
2020-03-02T10:30:36.000Z
|
2021-08-03T02:29:38.000Z
|
import pickle
import sys
import numpy as np
# Define the valid programs here
if __name__ == '__main__':
rollout_fn = sys.argv[1]
parse_rollout(rollout_fn=rollout_fn)
| 34.395062 | 401 | 0.623116 |
5fc54e77ecccf0f0df60b5cd1eae650a55b8cc8e
| 3,349 |
py
|
Python
|
signatureanalyzer/tests/test_mapping.py
|
julianhess/getzlab-SignatureAnalyzer
|
7f3ce93285c2aaaca88e82fee5a24854c224b453
|
[
"MIT"
] | 37 |
2020-01-16T15:00:27.000Z
|
2021-08-22T11:18:56.000Z
|
signatureanalyzer/tests/test_mapping.py
|
julianhess/getzlab-SignatureAnalyzer
|
7f3ce93285c2aaaca88e82fee5a24854c224b453
|
[
"MIT"
] | 18 |
2020-01-27T19:04:00.000Z
|
2021-09-26T14:19:39.000Z
|
signatureanalyzer/tests/test_mapping.py
|
julianhess/getzlab-SignatureAnalyzer
|
7f3ce93285c2aaaca88e82fee5a24854c224b453
|
[
"MIT"
] | 8 |
2020-07-07T14:05:44.000Z
|
2021-07-30T00:44:36.000Z
|
import unittest
import pandas as pd
import numpy as np
import os
import tempfile
import shutil
from signatureanalyzer.signatureanalyzer import run_spectra
from signatureanalyzer.bnmf import ardnmf
from signatureanalyzer.utils import file_loader
SPECTRA_ARROW = "../../examples/example_luad_spectra_1.tsv"
SPECTRA_WORD = "../../examples/example_luad_spectra_2.tsv"
if __name__ == '__main__':
unittest.main()
| 39.869048 | 118 | 0.696327 |
5fc5f8dbe2e450d186ac311e88fde09d3e71e36d
| 767 |
py
|
Python
|
src/transformer_utils/util/module_utils.py
|
cfoster0/transformer-utils
|
4e4bc61adb331f90bb2a9a394db07e25eda87555
|
[
"MIT"
] | 10 |
2021-07-11T07:32:35.000Z
|
2022-02-16T16:46:19.000Z
|
src/transformer_utils/util/module_utils.py
|
cfoster0/transformer-utils
|
4e4bc61adb331f90bb2a9a394db07e25eda87555
|
[
"MIT"
] | null | null | null |
src/transformer_utils/util/module_utils.py
|
cfoster0/transformer-utils
|
4e4bc61adb331f90bb2a9a394db07e25eda87555
|
[
"MIT"
] | 2 |
2021-05-24T22:50:28.000Z
|
2021-09-14T16:14:10.000Z
|
from .python_utils import make_print_if_verbose
| 23.96875 | 74 | 0.65189 |
5fc75bc9dcba17efcc6fbd5b1c74a679be2c870d
| 32,615 |
py
|
Python
|
monetio/models/_rrfs_cmaq_mm.py
|
zmoon/monetio
|
c8326750fa5d2404ccec726a5088f9a0e7fd4c4a
|
[
"MIT"
] | 1 |
2022-02-18T22:49:23.000Z
|
2022-02-18T22:49:23.000Z
|
monetio/models/_rrfs_cmaq_mm.py
|
zmoon/monetio
|
c8326750fa5d2404ccec726a5088f9a0e7fd4c4a
|
[
"MIT"
] | null | null | null |
monetio/models/_rrfs_cmaq_mm.py
|
zmoon/monetio
|
c8326750fa5d2404ccec726a5088f9a0e7fd4c4a
|
[
"MIT"
] | 1 |
2022-02-04T19:09:32.000Z
|
2022-02-04T19:09:32.000Z
|
""" RRFS-CMAQ File Reader """
import numpy as np
import xarray as xr
from numpy import concatenate
from pandas import Series
def open_mfdataset(
fname,
convert_to_ppb=True,
mech="cb6r3_ae6_aq",
var_list=None,
fname_pm25=None,
surf_only=False,
**kwargs
):
# Like WRF-chem add var list that just determines whether to calculate sums or not to speed this up.
"""Method to open RFFS-CMAQ dyn* netcdf files.
Parameters
----------
fname : string or list
fname is the path to the file or files. It will accept hot keys in
strings as well.
convert_to_ppb : boolean
If true the units of the gas species will be converted to ppbv
mech: str
Mechanism to be used for calculating sums. Mechanisms supported:
"cb6r3_ae6_aq"
var_list: list
List of variables to include in output. MELODIES-MONET only reads in
variables need to plot in order to save on memory and simulation cost
especially for vertical data. If None, will read in all model data and
calculate all sums.
fname_pm25: string or list
Optional path to the file or files for precalculated PM2.5 sums. It
will accept hot keys in strings as well.
surf_only: boolean
Whether to save only surface data to save on memory and computational
cost (True) or not (False).
Returns
-------
xarray.DataSet
RRFS-CMAQ model dataset in standard format for use in MELODIES-MONET
"""
# Get dictionary of summed species for the mechanism of choice.
dict_sum = dict_species_sums(mech=mech)
if var_list is not None:
# Read in only a subset of variables and only do calculations if needed.
var_list_orig = var_list.copy() # Keep track of the original list before changes.
list_calc_sum = []
list_remove_extra = [] # list of variables to remove after the sum to save in memory.
for var_sum in [
"PM25",
"PM10",
"noy_gas",
"noy_aer",
"nox",
"pm25_cl",
"pm25_ec",
"pm25_ca",
"pm25_na",
"pm25_nh4",
"pm25_no3",
"pm25_so4",
"pm25_om",
]:
if var_sum in var_list:
if var_sum == "PM25":
var_list.extend(dict_sum["aitken"])
var_list.extend(dict_sum["accumulation"])
var_list.extend(dict_sum["coarse"])
# Keep track to remove these later too
list_remove_extra.extend(dict_sum["aitken"])
list_remove_extra.extend(dict_sum["accumulation"])
list_remove_extra.extend(dict_sum["coarse"])
elif var_sum == "PM10":
var_list.extend(dict_sum["aitken"])
var_list.extend(dict_sum["accumulation"])
var_list.extend(dict_sum["coarse"])
# Keep track to remove these later too
list_remove_extra.extend(dict_sum["aitken"])
list_remove_extra.extend(dict_sum["accumulation"])
list_remove_extra.extend(dict_sum["coarse"])
else:
var_list.extend(dict_sum[var_sum])
# Keep track to remove these later too
list_remove_extra.extend(dict_sum[var_sum])
var_list.remove(var_sum)
list_calc_sum.append(var_sum)
# append the other needed species.
var_list.append("lat")
var_list.append("lon")
var_list.append("phalf")
var_list.append("tmp")
var_list.append("pressfc")
var_list.append("dpres")
var_list.append("hgtsfc")
var_list.append("delz")
# Remove duplicates just in case:
var_list = list(dict.fromkeys(var_list))
list_remove_extra = list(dict.fromkeys(list_remove_extra))
# Select only those elements in list_remove_extra that are not in var_list_orig
list_remove_extra_only = list(set(list_remove_extra) - set(var_list_orig))
# If variables in pm25 files are included remove these as these are not in the main file
# And will be added later.
for pm25_var in [
"PM25_TOT",
"PM25_TOT_NSOM",
"PM25_EC",
"PM25_NH4",
"PM25_NO3",
"PM25_SO4",
"PM25_OC",
"PM25_OM",
]:
if pm25_var in var_list:
var_list.remove(pm25_var)
# open the dataset using xarray
dset = xr.open_mfdataset(fname, concat_dim="time", combine="nested", **kwargs)[var_list]
else:
# Read in all variables and do all calculations.
dset = xr.open_mfdataset(fname, concat_dim="time", combine="nested", **kwargs)
list_calc_sum = [
"PM25",
"PM10",
"noy_gas",
"noy_aer",
"nox",
"pm25_cl",
"pm25_ec",
"pm25_ca",
"pm25_na",
"pm25_nh4",
"pm25_no3",
"pm25_so4",
"pm25_om",
]
if fname_pm25 is not None:
# Add the processed pm2.5 species.
dset_pm25 = xr.open_mfdataset(fname_pm25, concat_dim="time", combine="nested", **kwargs)
dset_pm25 = dset_pm25.drop(
labels=["lat", "lon", "pfull"]
) # Drop duplicate variables so can merge.
# Slight differences in pfull value between the files, but I assume that these still represent the
# same pressure levels from the model dynf* files.
# Attributes are formatted differently in pm25 file so remove attributes and use those from dynf* files.
dset_pm25.attrs = {}
dset = dset.merge(dset_pm25)
# Standardize some variable names
dset = dset.rename(
{
"grid_yt": "y",
"grid_xt": "x",
"pfull": "z",
"phalf": "z_i", # Interface pressure levels
"lon": "longitude",
"lat": "latitude",
"tmp": "temperature_k", # standard temperature (kelvin)
"pressfc": "surfpres_pa",
"dpres": "dp_pa", # Change names so standard surfpres_pa and dp_pa
"hgtsfc": "surfalt_m",
"delz": "dz_m",
}
) # Optional, but when available include altitude info
# Calculate pressure. This has to go before sorting because ak and bk
# are not sorted as they are in attributes
dset["pres_pa_mid"] = _calc_pressure(dset)
# Adjust pressure levels for all models such that the surface is first.
dset = dset.sortby("z", ascending=False)
dset = dset.sortby("z_i", ascending=False)
# Note this altitude calcs needs to always go after resorting.
# Altitude calculations are all optional, but for each model add values that are easy to calculate.
dset["alt_msl_m_full"] = _calc_hgt(dset)
dset["dz_m"] = dset["dz_m"] * -1.0 # Change to positive values.
# Set coordinates
dset = dset.reset_index(
["x", "y", "z", "z_i"], drop=True
) # For now drop z_i no variables use it.
dset["latitude"] = dset["latitude"].isel(time=0)
dset["longitude"] = dset["longitude"].isel(time=0)
dset = dset.reset_coords()
dset = dset.set_coords(["latitude", "longitude"])
# These sums and units are quite expensive and memory intensive,
# so add option to shrink dataset to just surface when needed
if surf_only:
dset = dset.isel(z=0).expand_dims("z", axis=1)
# Need to adjust units before summing for aerosols
# convert all gas species to ppbv
if convert_to_ppb:
for i in dset.variables:
if "units" in dset[i].attrs:
if "ppmv" in dset[i].attrs["units"]:
dset[i] *= 1000.0
dset[i].attrs["units"] = "ppbv"
# convert "ug/kg to ug/m3"
for i in dset.variables:
if "units" in dset[i].attrs:
if "ug/kg" in dset[i].attrs["units"]:
# ug/kg -> ug/m3 using dry air density
dset[i] = dset[i] * dset["pres_pa_mid"] / dset["temperature_k"] / 287.05535
dset[i].attrs["units"] = r"$\mu g m^{-3}$"
# add lazy diagnostic variables
# Note that because there are so many species to sum. Summing the aerosols is slowing down the code.
if "PM25" in list_calc_sum:
dset = add_lazy_pm25(dset, dict_sum)
if "PM10" in list_calc_sum:
dset = add_lazy_pm10(dset, dict_sum)
if "noy_gas" in list_calc_sum:
dset = add_lazy_noy_g(dset, dict_sum)
if "noy_aer" in list_calc_sum:
dset = add_lazy_noy_a(dset, dict_sum)
if "nox" in list_calc_sum:
dset = add_lazy_nox(dset, dict_sum)
if "pm25_cl" in list_calc_sum:
dset = add_lazy_cl_pm25(dset, dict_sum)
if "pm25_ec" in list_calc_sum:
dset = add_lazy_ec_pm25(dset, dict_sum)
if "pm25_ca" in list_calc_sum:
dset = add_lazy_ca_pm25(dset, dict_sum)
if "pm25_na" in list_calc_sum:
dset = add_lazy_na_pm25(dset, dict_sum)
if "pm25_nh4" in list_calc_sum:
dset = add_lazy_nh4_pm25(dset, dict_sum)
if "pm25_no3" in list_calc_sum:
dset = add_lazy_no3_pm25(dset, dict_sum)
if "pm25_so4" in list_calc_sum:
dset = add_lazy_so4_pm25(dset, dict_sum)
if "pm25_om" in list_calc_sum:
dset = add_lazy_om_pm25(dset, dict_sum)
# Change the times to pandas format
dset["time"] = dset.indexes["time"].to_datetimeindex(unsafe=True)
# Turn off warning for now. This is just because the model is in julian time
# Drop extra variables that were part of sum, but are not in original var_list
# to save memory and computational time.
# This is only revevant if var_list is provided
if var_list is not None:
if bool(list_remove_extra_only): # confirm list not empty
dset = dset.drop_vars(list_remove_extra_only)
return dset
def _get_keys(d):
"""Calculates keys
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
list
list of keys
"""
keys = Series([i for i in d.data_vars.keys()])
return keys
def add_lazy_pm25(d, dict_sum):
"""Calculates PM2.5 sum. 20% of coarse mode is included in PM2.5 sum.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new PM2.5 calculation
"""
keys = _get_keys(d)
allvars = Series(
concatenate([dict_sum["aitken"], dict_sum["accumulation"], dict_sum["coarse"]])
)
weights = Series(
concatenate(
[
np.ones(len(dict_sum["aitken"])),
np.ones(len(dict_sum["accumulation"])),
np.full(len(dict_sum["coarse"]), 0.2),
]
)
)
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
newweights = weights.loc[index]
d["PM25"] = add_multiple_lazy2(d, newkeys, weights=newweights)
d["PM25"] = d["PM25"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "PM2.5",
"long_name": "PM2.5 calculated by MONET assuming coarse mode 20%",
}
)
return d
def add_lazy_pm10(d, dict_sum):
"""Calculates PM10 sum.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new PM10 calculation
"""
keys = _get_keys(d)
allvars = Series(
concatenate([dict_sum["aitken"], dict_sum["accumulation"], dict_sum["coarse"]])
)
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
d["PM10"] = add_multiple_lazy2(d, newkeys)
d["PM10"] = d["PM10"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "PM10",
"long_name": "Particulate Matter < 10 microns",
}
)
return d
def add_lazy_noy_g(d, dict_sum):
"""Calculates NOy gas
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new NOy gas calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["noy_gas"])
weights = Series(dict_sum["noy_gas_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
newweights = weights.loc[index]
d["noy_gas"] = add_multiple_lazy2(d, newkeys, weights=newweights)
d["noy_gas"] = d["noy_gas"].assign_attrs({"name": "noy_gas", "long_name": "NOy gases"})
return d
def add_lazy_noy_a(d, dict_sum):
"""Calculates NOy aerosol
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new NOy aerosol calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["noy_aer"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
d["noy_aer"] = add_multiple_lazy2(d, newkeys)
d["noy_aer"] = d["noy_aer"].assign_attrs(
{"units": r"$\mu g m^{-3}$", "name": "noy_aer", "long_name": "NOy aerosol"}
)
return d
def add_lazy_nox(d, dict_sum):
"""Calculates NOx
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new NOx calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["nox"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
d["nox"] = add_multiple_lazy2(d, newkeys)
d["nox"] = d["nox"].assign_attrs({"name": "nox", "long_name": "nox"})
return d
def add_lazy_cl_pm25(d, dict_sum):
"""Calculates sum of particulate Cl.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new CLf calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_cl"])
weights = Series(dict_sum["pm25_cl_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
neww = weights.loc[index]
d["pm25_cl"] = add_multiple_lazy2(d, newkeys, weights=neww)
d["pm25_cl"] = d["pm25_cl"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "pm25_cl",
"long_name": "PM2.5 CL assuming coarse mode 20%",
}
)
return d
def add_lazy_ec_pm25(d, dict_sum):
"""Calculates sum of particulate EC.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new EC calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_ec"])
weights = Series(dict_sum["pm25_ec_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
neww = weights.loc[index]
d["pm25_ec"] = add_multiple_lazy2(d, newkeys, weights=neww)
d["pm25_ec"] = d["pm25_ec"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "pm25_ec",
"long_name": "PM2.5 EC assuming coarse mode 20%",
}
)
return d
def add_lazy_ca_pm25(d, dict_sum):
"""Calculates sum of particulate CA.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new CA calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_ca"])
weights = Series(dict_sum["pm25_ca_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
neww = weights.loc[index]
d["pm25_ca"] = add_multiple_lazy2(d, newkeys, weights=neww)
d["pm25_ca"] = d["pm25_ca"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "pm25_ca",
"long_name": "PM2.5 CA assuming coarse mode 20%",
}
)
return d
def add_lazy_na_pm25(d, dict_sum):
"""Calculates sum of particulate NA.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new NA calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_na"])
weights = Series(dict_sum["pm25_na_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
neww = weights.loc[index]
d["pm25_na"] = add_multiple_lazy2(d, newkeys, weights=neww)
d["pm25_na"] = d["pm25_na"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "pm25_na",
"long_name": "PM2.5 NA assuming coarse mode 20%",
}
)
return d
def add_lazy_nh4_pm25(d, dict_sum):
"""Calculates sum of particulate NH4.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new NH4 calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_nh4"])
weights = Series(dict_sum["pm25_nh4_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
neww = weights.loc[index]
d["pm25_nh4"] = add_multiple_lazy2(d, newkeys, weights=neww)
d["pm25_nh4"] = d["pm25_nh4"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "pm25_nh4",
"long_name": "PM2.5 NH4 assuming coarse mode 20%",
}
)
return d
def add_lazy_no3_pm25(d, dict_sum):
"""Calculates sum of particulate NO3.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new NO3 calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_no3"])
weights = Series(dict_sum["pm25_no3_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
neww = weights.loc[index]
d["pm25_no3"] = add_multiple_lazy2(d, newkeys, weights=neww)
d["pm25_no3"] = d["pm25_no3"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "pm25_no3",
"long_name": "PM2.5 NO3 assuming coarse mode 20%",
}
)
return d
def add_lazy_so4_pm25(d, dict_sum):
"""Calculates sum of particulate SO4.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new SO4 calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_so4"])
weights = Series(dict_sum["pm25_so4_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
neww = weights.loc[index]
d["pm25_so4"] = add_multiple_lazy2(d, newkeys, weights=neww)
d["pm25_so4"] = d["pm25_so4"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "pm25_so4",
"long_name": "PM2.5 SO4 assuming coarse mode 20%",
}
)
return d
def add_lazy_om_pm25(d, dict_sum):
"""Calculates sum of particulate OM.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new OM calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_om"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
d["pm25_om"] = add_multiple_lazy2(d, newkeys)
d["pm25_om"] = d["pm25_om"].assign_attrs(
{"units": r"$\mu g m^{-3}$", "name": "pm25_om", "long_name": "PM2.5 OM"}
)
return d
def add_multiple_lazy(dset, variables, weights=None):
"""Sums variables
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
variables : series
series of variables
variables : series
series of weights to apply to each variable during the sum
Returns
-------
xarray.Dataarray
Weighted sum of all specified variables
"""
from numpy import ones
if weights is None:
weights = ones(len(variables))
else:
weights = weights.values
variables = variables.values
new = dset[variables[0]].copy() * weights[0]
for i, j in zip(variables[1:], weights[1:]):
new = new + dset[i] * j
return new
def add_multiple_lazy2(dset, variables, weights=None):
"""Sums variables. This is similar to add_multiple_lazy, but is a little
faster.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
variables : series
series of variables
variables : series
series of weights to apply to each variable during the sum
Returns
-------
xarray.Dataarray
Weighted sum of all specified variables
"""
dset2 = dset[variables.values]
if weights is not None:
for i, j in zip(variables.values, weights.values):
dset2[i] = dset2[i] * j
new = dset2.to_array().sum("variable")
return new
def _predefined_mapping_tables(dset):
"""Predefined mapping tables for different observational parings used when
combining data.
Returns
-------
dictionary
dictionary defining default mapping tables
"""
to_improve = {}
to_nadp = {}
to_aqs = {
"OZONE": ["o3"],
"PM2.5": ["PM25"],
"CO": ["co"],
"NOY": ["NOy"],
"NOX": ["NOx"],
"SO2": ["so2"],
"NO": ["no"],
"NO2": ["no2"],
}
to_airnow = {
"OZONE": ["o3"],
"PM2.5": ["PM25"],
"CO": ["co"],
"NOY": ["NOy"],
"NOX": ["NOx"],
"SO2": ["so2"],
"NO": ["no"],
"NO2": ["no2"],
}
to_crn = {}
to_aeronet = {}
to_cems = {}
mapping_tables = {
"improve": to_improve,
"aqs": to_aqs,
"airnow": to_airnow,
"crn": to_crn,
"cems": to_cems,
"nadp": to_nadp,
"aeronet": to_aeronet,
}
dset = dset.assign_attrs({"mapping_tables": mapping_tables})
return dset
# For the different mechanisms, just update these arrays as needed.
def dict_species_sums(mech):
"""Predefined mapping tables for different observational parings used when
combining data.
Parameters
----------
mech : string
mechanism name
Returns
-------
dictionary
dictionary defining the variables to sum based on the specified mechanism
"""
if mech == "cb6r3_ae6_aq":
sum_dict = {}
# Arrays for different gasses and pm groupings
sum_dict.update(
{
"accumulation": [
"aso4j",
"ano3j",
"anh4j",
"anaj",
"aclj",
"aecj",
"aothrj",
"afej",
"asij",
"atij",
"acaj",
"amgj",
"amnj",
"aalj",
"akj",
"alvpo1j",
"asvpo1j",
"asvpo2j",
"asvpo3j",
"aivpo1j",
"axyl1j",
"axyl2j",
"axyl3j",
"atol1j",
"atol2j",
"atol3j",
"abnz1j",
"abnz2j",
"abnz3j",
"aiso1j",
"aiso2j",
"aiso3j",
"atrp1j",
"atrp2j",
"asqtj",
"aalk1j",
"aalk2j",
"apah1j",
"apah2j",
"apah3j",
"aorgcj",
"aolgbj",
"aolgaj",
"alvoo1j",
"alvoo2j",
"asvoo1j",
"asvoo2j",
"asvoo3j",
"apcsoj",
]
}
)
sum_dict.update(
{
"accumulation_wopc": [
"aso4j",
"ano3j",
"anh4j",
"anaj",
"aclj",
"aecj",
"aothrj",
"afej",
"asij",
"atij",
"acaj",
"amgj",
"amnj",
"aalj",
"akj",
"alvpo1j",
"asvpo1j",
"asvpo2j",
"asvpo3j",
"aivpo1j",
"axyl1j",
"axyl2j",
"axyl3j",
"atol1j",
"atol2j",
"atol3j",
"abnz1j",
"abnz2j",
"abnz3j",
"aiso1j",
"aiso2j",
"aiso3j",
"atrp1j",
"atrp2j",
"asqtj",
"aalk1j",
"aalk2j",
"apah1j",
"apah2j",
"apah3j",
"aorgcj",
"aolgbj",
"aolgaj",
"alvoo1j",
"alvoo2j",
"asvoo1j",
"asvoo2j",
"asvoo3j",
]
}
)
sum_dict.update(
{
"aitken": [
"aso4i",
"ano3i",
"anh4i",
"anai",
"acli",
"aeci",
"aothri",
"alvpo1i",
"asvpo1i",
"asvpo2i",
"alvoo1i",
"alvoo2i",
"asvoo1i",
"asvoo2i",
]
}
)
sum_dict.update(
{"coarse": ["asoil", "acors", "aseacat", "aclk", "aso4k", "ano3k", "anh4k"]}
)
sum_dict.update(
{
"noy_gas": [
"no",
"no2",
"no3",
"n2o5",
"hono",
"hno3",
"pna",
"cron",
"clno2",
"pan",
"panx",
"opan",
"ntr1",
"ntr2",
"intr",
]
}
)
sum_dict.update({"noy_gas_weight": [1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]})
sum_dict.update(
{"noy_aer": ["ano3i", "ano3j", "ano3k"]}
) # Need to confirm here if there is a size cutoff for noy obs?
sum_dict.update({"nox": ["no", "no2"]})
sum_dict.update({"pm25_cl": ["acli", "aclj", "aclk"]})
sum_dict.update({"pm25_cl_weight": [1, 1, 0.2]})
sum_dict.update({"pm25_ec": ["aeci", "aecj"]})
sum_dict.update({"pm25_ec_weight": [1, 1]})
sum_dict.update({"pm25_na": ["anai", "anaj", "aseacat", "asoil", "acors"]})
sum_dict.update({"pm25_na_weight": [1, 1, 0.2 * 0.8373, 0.2 * 0.0626, 0.2 * 0.0023]})
sum_dict.update({"pm25_ca": ["acaj", "aseacat", "asoil", "acors"]})
sum_dict.update({"pm25_ca_weight": [1, 0.2 * 0.0320, 0.2 * 0.0838, 0.2 * 0.0562]})
sum_dict.update({"pm25_nh4": ["anh4i", "anh4j", "anh4k"]})
sum_dict.update({"pm25_nh4_weight": [1, 1, 0.2]})
sum_dict.update({"pm25_no3": ["ano3i", "ano3j", "ano3k"]})
sum_dict.update({"pm25_no3_weight": [1, 1, 0.2]})
sum_dict.update({"pm25_so4": ["aso4i", "aso4j", "aso4k"]})
sum_dict.update({"pm25_so4_weight": [1, 1, 0.2]})
sum_dict.update(
{
"pm25_om": [
"alvpo1i",
"asvpo1i",
"asvpo2i",
"alvoo1i",
"alvoo2i",
"asvoo1i",
"asvoo2i",
"alvpo1j",
"asvpo1j",
"asvpo2j",
"asvpo3j",
"aivpo1j",
"axyl1j",
"axyl2j",
"axyl3j",
"atol1j",
"atol2j",
"atol3j",
"abnz1j",
"abnz2j",
"abnz3j",
"aiso1j",
"aiso2j",
"aiso3j",
"atrp1j",
"atrp2j",
"asqtj",
"aalk1j",
"aalk2j",
"apah1j",
"apah2j",
"apah3j",
"aorgcj",
"aolgbj",
"aolgaj",
"alvoo1j",
"alvoo2j",
"asvoo1j",
"asvoo2j",
"asvoo3j",
"apcsoj",
]
}
)
else:
raise NotImplementedError(
"Mechanism not supported, update _rrfs_cmaq_mm.py file in MONETIO"
)
return sum_dict
def _calc_hgt(f):
"""Calculates the geopotential height in m from the variables hgtsfc and
delz. Note: To use this function the delz value needs to go from surface
to top of atmosphere in vertical. Because we are adding the height of
each grid box these are really grid top values
Parameters
----------
f : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xr.DataArray
Geoptential height with attributes.
"""
sfc = f.surfalt_m.load()
dz = f.dz_m.load() * -1.0
# These are negative in RRFS-CMAQ, but you resorted and are adding from the surface,
# so make them positive.
dz[:, 0, :, :] = dz[:, 0, :, :] + sfc # Add the surface altitude to the first model level only
z = dz.rolling(z=len(f.z), min_periods=1).sum()
z.name = "alt_msl_m_full"
z.attrs["long_name"] = "Altitude MSL Full Layer in Meters"
z.attrs["units"] = "m"
return z
def _calc_pressure(dset):
"""Calculate the mid-layer pressure in Pa from surface pressure
and ak and bk constants.
Interface pressures are calculated by:
phalf(k) = a(k) + surfpres * b(k)
Mid layer pressures are calculated by:
pfull(k) = (phalf(k+1)-phalf(k))/log(phalf(k+1)/phalf(k))
Parameters
----------
dset : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.DataArray
Mid-layer pressure with attributes.
"""
pres = dset.dp_pa.copy().load() # Have to load into memory here so can assign levels.
srfpres = dset.surfpres_pa.copy().load()
for k in range(len(dset.z)):
pres_2 = dset.ak[k + 1] + srfpres * dset.bk[k + 1]
pres_1 = dset.ak[k] + srfpres * dset.bk[k]
pres[:, k, :, :] = (pres_2 - pres_1) / np.log(pres_2 / pres_1)
pres.name = "pres_pa_mid"
pres.attrs["units"] = "pa"
pres.attrs["long_name"] = "Pressure Mid Layer in Pa"
return pres
| 29.569356 | 112 | 0.508079 |
5fc818c5836435c92ae4ef2d17b3e1e01d7c0fde
| 816 |
bzl
|
Python
|
build/build.bzl
|
abaer123/gitlab-agent
|
71c94d781ae2a7ae2851bb946c37fe01b1ed3da0
|
[
"MIT"
] | null | null | null |
build/build.bzl
|
abaer123/gitlab-agent
|
71c94d781ae2a7ae2851bb946c37fe01b1ed3da0
|
[
"MIT"
] | null | null | null |
build/build.bzl
|
abaer123/gitlab-agent
|
71c94d781ae2a7ae2851bb946c37fe01b1ed3da0
|
[
"MIT"
] | null | null | null |
load("@com_github_atlassian_bazel_tools//multirun:def.bzl", "command")
load("@bazel_skylib//lib:shell.bzl", "shell")
# This macro expects target directory for the file as an additional command line argument.
| 37.090909 | 99 | 0.658088 |
5fc9836cfddecb88f1956951f281f1c8d40b8f81
| 4,471 |
py
|
Python
|
CAAPR/CAAPR_AstroMagic/PTS/pts/magic/catalog/catalog.py
|
wdobbels/CAAPR
|
50d0b32642a61af614c22f1c6dc3c4a00a1e71a3
|
[
"MIT"
] | 7 |
2016-05-20T21:56:39.000Z
|
2022-02-07T21:09:48.000Z
|
CAAPR/CAAPR_AstroMagic/PTS/pts/magic/catalog/catalog.py
|
wdobbels/CAAPR
|
50d0b32642a61af614c22f1c6dc3c4a00a1e71a3
|
[
"MIT"
] | 1 |
2019-03-21T16:10:04.000Z
|
2019-03-22T17:21:56.000Z
|
CAAPR/CAAPR_AstroMagic/PTS/pts/magic/catalog/catalog.py
|
wdobbels/CAAPR
|
50d0b32642a61af614c22f1c6dc3c4a00a1e71a3
|
[
"MIT"
] | 1 |
2020-05-19T16:17:17.000Z
|
2020-05-19T16:17:17.000Z
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.catalog.catalog Contains the GalacticCatalog and StellarCatalog classes.
# -----------------------------------------------------------------
# Ensure Python 3 functionality
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from ..tools import catalogs
from ...core.tools import introspection, tables
from ...core.tools import filesystem as fs
# -----------------------------------------------------------------
catalogs_user_path = fs.join(introspection.pts_user_dir, "catalogs")
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# -----------------------------------------------------------------
| 31.485915 | 97 | 0.547976 |
5fcaa9f085f2d78ed188a66c5c69d0728b2a6373
| 2,640 |
py
|
Python
|
tools/common.py
|
JamzumSum/yNet
|
78506738e64321cfd26f0af70a62dd2119948e39
|
[
"MIT"
] | 5 |
2021-06-09T02:11:19.000Z
|
2021-10-04T09:00:31.000Z
|
tools/common.py
|
JamzumSum/yNet
|
78506738e64321cfd26f0af70a62dd2119948e39
|
[
"MIT"
] | null | null | null |
tools/common.py
|
JamzumSum/yNet
|
78506738e64321cfd26f0af70a62dd2119948e39
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from typing import Iterable
import torch
from torchmetrics import ConfusionMatrix
from collections import defaultdict
argmax = lambda l: l.index(max(l))
BIRAD_MAP = ['2', '3', '4', '5']
_lbm()
| 24.220183 | 117 | 0.591667 |
5fcb3be04540c3af2931e387575e6b75d7da7f7e
| 34,361 |
py
|
Python
|
quantlib/backends/twn_accelerator/grrules/dporules.py
|
mdatres/quantlab
|
09fb24ede78f49768f829afe0fac2ac291b8fd4f
|
[
"Apache-2.0"
] | null | null | null |
quantlib/backends/twn_accelerator/grrules/dporules.py
|
mdatres/quantlab
|
09fb24ede78f49768f829afe0fac2ac291b8fd4f
|
[
"Apache-2.0"
] | null | null | null |
quantlib/backends/twn_accelerator/grrules/dporules.py
|
mdatres/quantlab
|
09fb24ede78f49768f829afe0fac2ac291b8fd4f
|
[
"Apache-2.0"
] | 1 |
2022-01-02T10:10:46.000Z
|
2022-01-02T10:10:46.000Z
|
#
# dporules.py
#
# Author(s):
# Matteo Spallanzani <[email protected]>
#
# Copyright (c) 2020-2021 ETH Zurich.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import networkx as nx
from collections import OrderedDict
import itertools
import math
import torch
import torch.nn as nn
import quantlib.editing.graphs as qg
from quantlib.editing.graphs.grrules.dporules import DPORule
from quantlib.editing.graphs.grrules import Seeker
from quantlib.editing.graphs.graphs.nodes import Bipartite, __NODE_ID_FORMAT__, PyTorchNode
import quantlib.algorithms as qa
from .folding import foldsteinqconvbnste, foldconvbnste, foldsteinqconvbn
__all__ = [
'FoldSTEINQConvBNSTETypeARule',
'FoldSTEINQConvBNSTETypeBRule',
'FoldConvBNSTERule',
'FoldSTEINQConvBNRule',
]
| 47.723611 | 143 | 0.619831 |
5fcc22d5ecaf0da083c5ac9d8ac997e97cc93417
| 5,896 |
py
|
Python
|
news_api/endpoints/models.py
|
rdoume/News_API
|
9c555fdc5e5b717b98bcfec27364b9612b9c4aa1
|
[
"MIT"
] | 9 |
2019-07-19T13:19:55.000Z
|
2021-07-08T16:25:30.000Z
|
news_api/endpoints/models.py
|
rdoume/News_API
|
9c555fdc5e5b717b98bcfec27364b9612b9c4aa1
|
[
"MIT"
] | null | null | null |
news_api/endpoints/models.py
|
rdoume/News_API
|
9c555fdc5e5b717b98bcfec27364b9612b9c4aa1
|
[
"MIT"
] | 1 |
2021-05-12T01:50:04.000Z
|
2021-05-12T01:50:04.000Z
|
# -*- coding: utf-8 -*-
# System imports
import json
# Third-party imports
import falcon
from news_api.endpoints.vespaSearcher import vespaSearch
from news_api.endpoints.top_entities import getTopNewEntities
from news_api.endpoints.top_clusters import getTopNewCluster
# Local imports
# from news_api import settings
| 35.518072 | 111 | 0.518318 |
5fcda78cf21f154d5256341e1d4f6994551d5ce9
| 858 |
py
|
Python
|
exercicio9.py
|
isaacfelipe1/Estrutura_De_Dados_Um_UEA
|
79b693d186154b54b7bb0c2dac10cd4cf9886bb3
|
[
"Apache-2.0"
] | null | null | null |
exercicio9.py
|
isaacfelipe1/Estrutura_De_Dados_Um_UEA
|
79b693d186154b54b7bb0c2dac10cd4cf9886bb3
|
[
"Apache-2.0"
] | null | null | null |
exercicio9.py
|
isaacfelipe1/Estrutura_De_Dados_Um_UEA
|
79b693d186154b54b7bb0c2dac10cd4cf9886bb3
|
[
"Apache-2.0"
] | null | null | null |
#9-Faa um programa que leia um nmero indeterminado de notas. Aps esta entrada de dados, faa seguinte:
#. Mostre a quantidade de notas que foram lidas.
#. Exiba todas as notas na ordem em que foram informadas.
#. Calcule e mostre a mdia das notas.
#. Calcule e mostre a quantidade de notas acima da mdia calculada.
list=[]
acima_media=[]
notas=float(input("Informe suas notas(-1 para sair\n"))
while(notas>=0):
list.append(notas)
notas=float(input("Informe suas notas(-1 para sair\n"))
media=sum(list)/len(list)
for i, word in enumerate(list):
if word>media:
acima_media+=[word]
soma=len(acima_media)
print('na posio',i,'foi digitado o nmero ',word)
print(f' A quantidades de notas que foram informados: {len(list)}')
print()
print('=>'*30)
print(f'A mdia das notas foi {media}')
print(f'{soma}')
print(acima_media)
| 35.75 | 105 | 0.708625 |
5fcddc4097a230efd88262807f43401aaaeff2ab
| 257 |
py
|
Python
|
p5.py
|
kmark1625/Project-Euler
|
e80c4f2044fdbff93331117b8f02aa0becbb0706
|
[
"MIT"
] | null | null | null |
p5.py
|
kmark1625/Project-Euler
|
e80c4f2044fdbff93331117b8f02aa0becbb0706
|
[
"MIT"
] | null | null | null |
p5.py
|
kmark1625/Project-Euler
|
e80c4f2044fdbff93331117b8f02aa0becbb0706
|
[
"MIT"
] | null | null | null |
from fractions import gcd
def smallestDiv():
"""Finds smallest number that is evenly divisible from 1 through 20"""
return reduce(lambda x,y: lcm(x,y), range(1,21))
if __name__ == '__main__':
print smallestDiv()
| 21.416667 | 71 | 0.692607 |
5fcf633d461876ef2ed0512751ad534119c618aa
| 1,249 |
py
|
Python
|
src/resnet_datasize_plot.py
|
chloechsu/nanoparticle
|
5e78fe33c2d562aa31d5e458be0dbf52813f20b1
|
[
"MIT"
] | 1 |
2021-04-04T23:07:59.000Z
|
2021-04-04T23:07:59.000Z
|
src/resnet_datasize_plot.py
|
chloechsu/nanoparticle
|
5e78fe33c2d562aa31d5e458be0dbf52813f20b1
|
[
"MIT"
] | null | null | null |
src/resnet_datasize_plot.py
|
chloechsu/nanoparticle
|
5e78fe33c2d562aa31d5e458be0dbf52813f20b1
|
[
"MIT"
] | 3 |
2021-01-13T14:50:42.000Z
|
2022-03-20T16:19:52.000Z
|
import argparse
import csv
import glob
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
sns.set()
shapes = ['TriangPrismIsosc', 'parallelepiped', 'sphere', 'wire']
if __name__ == "__main__":
main()
| 29.046512 | 78 | 0.622898 |
5fd0efe4c22b97942030348d8ad7858091215264
| 1,482 |
py
|
Python
|
pyramid_bootstrap/__init__.py
|
keitheis/pyramid_bootstrap
|
e8d6e8b9081427bca264d16a679571c35d3527e5
|
[
"BSD-3-Clause"
] | null | null | null |
pyramid_bootstrap/__init__.py
|
keitheis/pyramid_bootstrap
|
e8d6e8b9081427bca264d16a679571c35d3527e5
|
[
"BSD-3-Clause"
] | null | null | null |
pyramid_bootstrap/__init__.py
|
keitheis/pyramid_bootstrap
|
e8d6e8b9081427bca264d16a679571c35d3527e5
|
[
"BSD-3-Clause"
] | 1 |
2018-04-12T14:27:52.000Z
|
2018-04-12T14:27:52.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Keith Yang'
__email__ = '[email protected]'
__version__ = '0.1.0'
from pyramid.settings import asbool
from .bootstrap import BootstrapFactory
| 30.244898 | 79 | 0.625506 |
5fd224ae58a35451a109abe33921bfe534a36c4b
| 3,043 |
py
|
Python
|
Data Structures/Linked List/Merge Two Sorted Linked Lists/merge_two_sorted_linked_lists.py
|
brianchiang-tw/HackerRank
|
02a30a0033b881206fa15b8d6b4ef99b2dc420c8
|
[
"MIT"
] | 2 |
2020-05-28T07:15:00.000Z
|
2020-07-21T08:34:06.000Z
|
Data Structures/Linked List/Merge Two Sorted Linked Lists/merge_two_sorted_linked_lists.py
|
brianchiang-tw/HackerRank
|
02a30a0033b881206fa15b8d6b4ef99b2dc420c8
|
[
"MIT"
] | null | null | null |
Data Structures/Linked List/Merge Two Sorted Linked Lists/merge_two_sorted_linked_lists.py
|
brianchiang-tw/HackerRank
|
02a30a0033b881206fa15b8d6b4ef99b2dc420c8
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import math
import os
import random
import re
import sys
def print_singly_linked_list(node, sep, fptr):
while node:
fptr.write(str(node.data))
node = node.next
if node:
fptr.write(sep)
# Complete the mergeLists function below.
#
# For your reference:
#
# SinglyLinkedListNode:
# int data
# SinglyLinkedListNode next
#
#
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
tests = int(input())
for tests_itr in range(tests):
llist1_count = int(input())
llist1 = SinglyLinkedList()
for _ in range(llist1_count):
llist1_item = int(input())
llist1.insert_node(llist1_item)
llist2_count = int(input())
llist2 = SinglyLinkedList()
for _ in range(llist2_count):
llist2_item = int(input())
llist2.insert_node(llist2_item)
llist3 = mergeLists(llist1.head, llist2.head)
print_singly_linked_list(llist3, ' ', fptr)
fptr.write('\n')
fptr.close()
| 21.58156 | 72 | 0.612882 |
39563b416a76edc246cc669718217ec4a6dc8d69
| 199 |
py
|
Python
|
tools/stress_test.py
|
chouette254/quo
|
8979afd118e77d3d0f93f9fbe8711efada7158c5
|
[
"MIT"
] | 5 |
2021-06-17T21:06:39.000Z
|
2022-03-11T06:45:51.000Z
|
tools/stress_test.py
|
chouette254/quo
|
8979afd118e77d3d0f93f9fbe8711efada7158c5
|
[
"MIT"
] | 39 |
2021-07-19T19:36:18.000Z
|
2022-02-23T14:55:08.000Z
|
tools/stress_test.py
|
secretuminc/quo
|
c4f77d52f015c612d32ed0fc2fc79545af598f10
|
[
"MIT"
] | 1 |
2021-05-31T17:19:15.000Z
|
2021-05-31T17:19:15.000Z
|
from quo import Console
from quo.pretty import Pretty
from quo.panel import Panel
DATA = "My name is Quo"
console = Console()
for w in range(130):
console.echo(Panel(Pretty(DATA), width=w))
| 15.307692 | 46 | 0.718593 |
3957f752a49e9fed33ab81dcc197e7f08498b9c3
| 4,856 |
py
|
Python
|
wysihtml5/conf/defaults.py
|
vkuryachenko/django-wysihtml5
|
5f6fa86ecbfeccfae61b06386f1f6f44dfca94c0
|
[
"BSD-2-Clause"
] | 4 |
2015-03-24T20:41:31.000Z
|
2021-05-24T15:41:16.000Z
|
wysihtml5/conf/defaults.py
|
vkuryachenko/django-wysihtml5
|
5f6fa86ecbfeccfae61b06386f1f6f44dfca94c0
|
[
"BSD-2-Clause"
] | 1 |
2017-08-06T18:17:53.000Z
|
2017-08-06T18:17:53.000Z
|
wysihtml5/conf/defaults.py
|
vkuryachenko/django-wysihtml5
|
5f6fa86ecbfeccfae61b06386f1f6f44dfca94c0
|
[
"BSD-2-Clause"
] | 3 |
2015-05-14T15:06:21.000Z
|
2021-05-24T15:43:05.000Z
|
#-*- coding: utf-8 -*-
from django.conf import settings
WYSIHTML5_EDITOR = {
# Give the editor a name, the name will also be set as class
# name on the iframe and on the iframe's body
'name': 'null',
# Whether the editor should look like the textarea (by adopting styles)
'style': 'true',
# Id of the toolbar element, pass false if you don't want
# any toolbar logic
'toolbar': 'null',
# Whether urls, entered by the user should automatically become
# clickable-links
'autoLink': 'true',
# Object which includes parser rules (set this to
# examples/rules/spec.json or your own spec, otherwise only span
# tags are allowed!)
'parserRules': 'wysihtml5ParserRules',
# Parser method to use when the user inserts content via copy & paste
'parser': 'wysihtml5.dom.parse || Prototype.K',
# Class name which should be set on the contentEditable element in
# the created sandbox iframe, can be styled via the 'stylesheets' option
'composerClassName': '"wysihtml5-editor"',
# Class name to add to the body when the wysihtml5 editor is supported
'bodyClassName': '"wysihtml5-supported"',
# By default wysihtml5 will insert <br> for line breaks, set this to
# false to use <p>
'useLineBreaks': 'true',
# Array (or single string) of stylesheet urls to be loaded in the
# editor's iframe
'stylesheets': '["%s"]' % (settings.STATIC_URL +
"wysihtml5/css/stylesheet.css"),
# Placeholder text to use, defaults to the placeholder attribute
# on the textarea element
'placeholderText': 'null',
# Whether the composer should allow the user to manually resize
# images, tables etc.
'allowObjectResizing': 'true',
# Whether the rich text editor should be rendered on touch devices
# (wysihtml5 >= 0.3.0 comes with basic support for iOS 5)
'supportTouchDevices': 'true'
}
WYSIHTML5_TOOLBAR = {
"formatBlockHeader": {
"active": True,
"command_name": "formatBlock",
"render_icon": "wysihtml5.widgets.render_formatBlockHeader_icon"
},
"formatBlockParagraph": {
"active": True,
"command_name": "formatBlock",
"render_icon": "wysihtml5.widgets.render_formatBlockParagraph_icon"
},
"bold": {
"active": True,
"command_name": "bold",
"render_icon": "wysihtml5.widgets.render_bold_icon"
},
"italic": {
"active": True,
"command_name": "italic",
"render_icon": "wysihtml5.widgets.render_italic_icon"
},
"underline": {
"active": True,
"command_name": "underline",
"render_icon": "wysihtml5.widgets.render_underline_icon"
},
"justifyLeft": {
"active": True,
"command_name": "justifyLeft",
"render_icon": "wysihtml5.widgets.render_justifyLeft_icon"
},
"justifyCenter": {
"active": True,
"command_name": "justifyCenter",
"render_icon": "wysihtml5.widgets.render_justifyCenter_icon"
},
"justifyRight": {
"active": True,
"command_name": "justifyRight",
"render_icon": "wysihtml5.widgets.render_justifyRight_icon"
},
"insertOrderedList": {
"active": True,
"command_name": "insertOrderedList",
"render_icon": "wysihtml5.widgets.render_insertOrderedList_icon"
},
"insertUnorderedList": {
"active": True,
"command_name": "insertUnorderedList",
"render_icon": "wysihtml5.widgets.render_insertUnorderedList_icon"
},
"insertImage": {
"active": True,
"command_name": "insertImage",
"render_icon": "wysihtml5.widgets.render_insertImage_icon",
"render_dialog": "wysihtml5.widgets.render_insertImage_dialog"
},
"createLink": {
"active": True,
"command_name": "createLink",
"render_icon": "wysihtml5.widgets.render_createLink_icon",
"render_dialog": "wysihtml5.widgets.render_createLink_dialog"
},
"insertHTML": {
"active": True,
"command_name": "insertHTML",
"command_value": "<blockquote>quote</blockquote>",
"render_icon": "wysihtml5.widgets.render_insertHTML_icon"
},
"foreColor": {
"active": True,
"command_name": "foreColor",
"render_icon": "wysihtml5.widgets.render_foreColor_icon"
},
"changeView": {
"active": True,
"command_name": "change_view",
"render_icon": "wysihtml5.widgets.render_changeView_icon"
},
}
# This is necessary to protect the field of content in cases where
# the user disables JavaScript in the browser, so that Wysihtml5 can't
# do the filter job.
WYSIHTML5_ALLOWED_TAGS = ('h1 h2 h3 h4 h5 h6 div p b i u'
' ul ol li span img a blockquote')
| 36.787879 | 76 | 0.635914 |
395a96908738ec18c9180da4437fee979a2a2992
| 6,496 |
py
|
Python
|
protocols/migration/migration_participant_100_to_reports_300.py
|
Lucioric2000/GelReportModels
|
1704cdea3242d5b46c8b81ef46553ccae2799435
|
[
"Apache-2.0"
] | 14 |
2016-09-22T10:10:01.000Z
|
2020-09-23T11:40:37.000Z
|
protocols/migration/migration_participant_100_to_reports_300.py
|
Lucioric2000/GelReportModels
|
1704cdea3242d5b46c8b81ef46553ccae2799435
|
[
"Apache-2.0"
] | 159 |
2016-09-22T11:08:46.000Z
|
2021-09-29T13:55:52.000Z
|
protocols/migration/migration_participant_100_to_reports_300.py
|
Lucioric2000/GelReportModels
|
1704cdea3242d5b46c8b81ef46553ccae2799435
|
[
"Apache-2.0"
] | 17 |
2016-09-20T13:31:58.000Z
|
2020-10-19T04:58:19.000Z
|
from protocols import reports_3_0_0 as participant_old
from protocols import participant_1_0_0
from protocols.migration import BaseMigration
| 56.982456 | 129 | 0.736761 |
395b088785153a0b12425d78d2c97981d28c0b99
| 584 |
py
|
Python
|
bluebottle/test/factory_models/pages.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 10 |
2015-05-28T18:26:40.000Z
|
2021-09-06T10:07:03.000Z
|
bluebottle/test/factory_models/pages.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 762 |
2015-01-15T10:00:59.000Z
|
2022-03-31T15:35:14.000Z
|
bluebottle/test/factory_models/pages.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 9 |
2015-02-20T13:19:30.000Z
|
2022-03-08T14:09:17.000Z
|
from builtins import object
from datetime import timedelta
import factory
from django.utils.timezone import now
from bluebottle.pages.models import Page
from .accounts import BlueBottleUserFactory
| 27.809524 | 66 | 0.741438 |
395bc11ce97e1bb26dff3ffa2dd8e88c133704f6
| 2,403 |
py
|
Python
|
ietf/ipr/migrations/0007_create_ipr_doc_events.py
|
hassanakbar4/ietfdb
|
cabee059092ae776015410640226064331c293b7
|
[
"BSD-3-Clause"
] | 25 |
2022-03-05T08:26:52.000Z
|
2022-03-30T15:45:42.000Z
|
ietf/ipr/migrations/0007_create_ipr_doc_events.py
|
hassanakbar4/ietfdb
|
cabee059092ae776015410640226064331c293b7
|
[
"BSD-3-Clause"
] | 219 |
2022-03-04T17:29:12.000Z
|
2022-03-31T21:16:14.000Z
|
ietf/ipr/migrations/0007_create_ipr_doc_events.py
|
hassanakbar4/ietfdb
|
cabee059092ae776015410640226064331c293b7
|
[
"BSD-3-Clause"
] | 22 |
2022-03-04T15:34:34.000Z
|
2022-03-28T13:30:59.000Z
|
# Copyright The IETF Trust 2020, All Rights Reserved
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-01-17 12:32
from django.db import migrations
def create_or_delete_ipr_doc_events(apps, delete=False):
"""Create or delete DocEvents for IprEvents
Mostly duplicates IprEvent.create_doc_events(). This is necessary
because model methods, including custom save() methods, are not
available to migrations.
"""
IprEvent = apps.get_model('ipr', 'IprEvent')
DocEvent = apps.get_model('doc', 'DocEvent')
# Map from self.type_id to DocEvent.EVENT_TYPES for types that
# should be logged as DocEvents
event_type_map = {
'posted': 'posted_related_ipr',
'removed': 'removed_related_ipr',
}
for ipr_event in IprEvent.objects.filter(type_id__in=event_type_map):
related_docs = set() # related docs, no duplicates
for alias in ipr_event.disclosure.docs.all():
related_docs.update(alias.docs.all())
for doc in related_docs:
kwargs = dict(
type=event_type_map[ipr_event.type_id],
time=ipr_event.time,
by=ipr_event.by,
doc=doc,
rev='',
desc='%s related IPR disclosure: <b>%s</b>' % (ipr_event.type.name,
ipr_event.disclosure.title),
)
events = DocEvent.objects.filter(**kwargs) # get existing events
if delete:
events.delete()
elif len(events) == 0:
DocEvent.objects.create(**kwargs) # create if did not exist
def forward(apps, schema_editor):
"""Create a DocEvent for each 'posted' or 'removed' IprEvent"""
create_or_delete_ipr_doc_events(apps, delete=False)
def reverse(apps, schema_editor):
"""Delete DocEvents that would be created by the forward migration
This removes data, but only data that can be regenerated by running
the forward migration.
"""
create_or_delete_ipr_doc_events(apps, delete=True)
| 34.826087 | 91 | 0.62422 |
395f29ec9cf26aad90082c0bbf20534ee8f84d4b
| 788 |
py
|
Python
|
getting_setting.py
|
madhurgupta96/Image-Fundamentals-with-OpenCV
|
890fcce30155e98ab66e206c3511d77040570ec5
|
[
"Apache-2.0"
] | null | null | null |
getting_setting.py
|
madhurgupta96/Image-Fundamentals-with-OpenCV
|
890fcce30155e98ab66e206c3511d77040570ec5
|
[
"Apache-2.0"
] | null | null | null |
getting_setting.py
|
madhurgupta96/Image-Fundamentals-with-OpenCV
|
890fcce30155e98ab66e206c3511d77040570ec5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 15 23:52:04 2020
@author: Madhur Gupta
"""
from __future__ import print_function
import cv2
import argparse
ap=argparse.ArgumentParser()
ap.add_argument('-i','--image',required=True,help='path to image')
args=vars(ap.parse_args())
image=cv2.imread(args['image'])
cv2.imshow("Original", image)
#setting 0,0 as red pixel
(b,g,r)=image[0,0]
print("Pixel at (0, 0) - Red: {}, Green: {}, Blue: {}".format(r,g, b))
image[0, 0] = (0, 0, 255)
(b, g, r) = image[0, 0]
print("Pixel at (0, 0) - Red: {}, Green: {}, Blue: {}".format(r,g, b))
#setting the corner of image as green
corner=image[0:100,0:100]
cv2.imshow('corner',corner)
image[0:100,0:100]=(0,255,0)
cv2.imshow('Updated',image)
cv2.waitKey(0)
| 22.514286 | 71 | 0.619289 |
395f4cf60fb9e63158d7823964bdae4a063e3899
| 665 |
py
|
Python
|
zk_shell/tests/test_acl_reader.py
|
sellers/zk_shell
|
5f5972c4362212f97de91a75e44d2a551c7bcd51
|
[
"Apache-2.0"
] | 163 |
2015-01-24T06:17:34.000Z
|
2021-12-17T22:58:46.000Z
|
zk_shell/tests/test_acl_reader.py
|
sellers/zk_shell
|
5f5972c4362212f97de91a75e44d2a551c7bcd51
|
[
"Apache-2.0"
] | 86 |
2015-01-01T00:22:57.000Z
|
2022-03-02T14:50:59.000Z
|
zk_shell/tests/test_acl_reader.py
|
sellers/zk_shell
|
5f5972c4362212f97de91a75e44d2a551c7bcd51
|
[
"Apache-2.0"
] | 32 |
2015-02-18T17:33:16.000Z
|
2021-12-28T03:43:45.000Z
|
# -*- coding: utf-8 -*-
""" ACLReader test cases """
import unittest
from kazoo.security import ACL, Id
from zk_shell.acl import ACLReader
| 28.913043 | 97 | 0.685714 |
395f821293e57d64e71d8ac788f63dcdb5e4e300
| 3,815 |
py
|
Python
|
dictator/validators/base.py
|
brunosmmm/dictator
|
60314734b9d0c378fad77d296c8946165f372400
|
[
"MIT"
] | null | null | null |
dictator/validators/base.py
|
brunosmmm/dictator
|
60314734b9d0c378fad77d296c8946165f372400
|
[
"MIT"
] | null | null | null |
dictator/validators/base.py
|
brunosmmm/dictator
|
60314734b9d0c378fad77d296c8946165f372400
|
[
"MIT"
] | null | null | null |
"""Base validators."""
import re
from dictator.errors import ValidationError
from dictator.validators import Validator
from typing import Type, Callable, Any, Tuple, Union
HEX_REGEX = re.compile(r"^(0x)?([0-9A-Fa-f]+)$")
BIN_REGEX = re.compile(r"^(0b)?([0-1]+)$")
def _validate_integer(_value: Any, **kwargs: Any) -> int:
"""Validate integer value.
Parameters
----------
_value
Some value
kwargs
Other metadata
"""
if isinstance(_value, str):
# try converting
h = HEX_REGEX.match(_value)
b = BIN_REGEX.match(_value)
if h is not None:
if h.group(1) is None and b is not None:
# is actually binary
return int(h.group(2), 2)
return int(h.group(2), 16)
raise ValidationError("cannot validate as integer")
elif isinstance(_value, bool):
raise ValidationError("cannot validate as integer, got boolean")
elif isinstance(_value, int):
return _value
raise ValidationError("cannot validate as integer")
validate_string = ValidatorFactory(ValidateType(str))
validate_list = ValidatorFactory(ValidateType(tuple, list))
validate_dict = ValidatorFactory(ValidateType(dict))
validate_boolean = ValidatorFactory(ValidateType(bool))
validate_float = ValidatorFactory(ValidateType(float))
validate_integer = ValidatorFactory(_validate_integer)
validate_string_pre = ValidatorFactory(ValidateType(str), after_fn=False)
validate_list_pre = ValidatorFactory(ValidateType(tuple, list), after_fn=False)
validate_dict_pre = ValidatorFactory(ValidateType(dict), after_fn=False)
validate_boolean_pre = ValidatorFactory(ValidateType(bool), after_fn=False)
validate_float_pre = ValidatorFactory(ValidateType(float), after_fn=False)
validate_integer_pre = ValidatorFactory(_validate_integer, after_fn=False)
def validate_null(_value: Any, **kwargs: Any) -> None:
"""Validate null value.
Parameters
---------
_value
Some value
kwargs
Other metadata
"""
if _value is not None:
raise ValidationError("value is not null")
return _value
DEFAULT_VALIDATOR_BY_TYPE = {
int: validate_integer,
str: validate_string,
list: validate_list,
dict: validate_dict,
bool: validate_boolean,
float: validate_float,
}
| 27.644928 | 79 | 0.654522 |
3960d947244ab5cacdb399b505a02597c36f0c4b
| 554 |
py
|
Python
|
copasi_test/ReportParserMoieties.py
|
copasi/python-copasi-testsuite
|
604ce52f95b4a0e2631712b22c331cd8c263bd05
|
[
"Artistic-2.0"
] | null | null | null |
copasi_test/ReportParserMoieties.py
|
copasi/python-copasi-testsuite
|
604ce52f95b4a0e2631712b22c331cd8c263bd05
|
[
"Artistic-2.0"
] | null | null | null |
copasi_test/ReportParserMoieties.py
|
copasi/python-copasi-testsuite
|
604ce52f95b4a0e2631712b22c331cd8c263bd05
|
[
"Artistic-2.0"
] | null | null | null |
from .ReportParser import ReportParser
| 29.157895 | 71 | 0.628159 |
396297e39e5a9bcc3e2b8459e2edf7a1785fe3e7
| 1,575 |
py
|
Python
|
models/networks/recurrent/encoder.py
|
jamesoneill12/LayerFusion
|
99cba1030ed8c012a453bc7715830fc99fb980dc
|
[
"Apache-2.0"
] | null | null | null |
models/networks/recurrent/encoder.py
|
jamesoneill12/LayerFusion
|
99cba1030ed8c012a453bc7715830fc99fb980dc
|
[
"Apache-2.0"
] | null | null | null |
models/networks/recurrent/encoder.py
|
jamesoneill12/LayerFusion
|
99cba1030ed8c012a453bc7715830fc99fb980dc
|
[
"Apache-2.0"
] | null | null | null |
import torch.nn as nn
import torch
"""
# use this one when not doing multi-task learning as a baseline
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size, nlayers=2):
super(EncoderRNN, self).__init__()
self.nlayers = nlayers
self.hidden_size = hidden_size
self.embedding = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size, nlayers)
def forward(self, input, hidden):
embedded = self.embedding(input).view(1, 1, -1)
output = embedded
output, hidden = self.gru(output, hidden)
return output, hidden
def initHidden(self, bsz):
return torch.zeros(self.nlayers, bsz, self.hidden_size, device='gpu')
"""
| 35 | 109 | 0.670476 |
396309f795615e199934ec29198bf8e06add077e
| 1,087 |
py
|
Python
|
relationship_classifiction/test.py
|
suolyer/PyTorch_BERT_Pipeline_IE
|
869a1fc937e268a565f5b30a2105a460b4e07f59
|
[
"MIT"
] | 8 |
2021-05-23T02:04:09.000Z
|
2022-01-14T08:58:42.000Z
|
relationship_classifiction/test.py
|
2019hong/PyTorch_BERT_Pipeline_IE
|
9ee66bc9ceaed42e996e9b2414612de3fc0b23bb
|
[
"MIT"
] | 2 |
2021-05-14T00:34:45.000Z
|
2021-08-08T08:36:33.000Z
|
relationship_classifiction/test.py
|
2019hong/PyTorch_BERT_Pipeline_IE
|
9ee66bc9ceaed42e996e9b2414612de3fc0b23bb
|
[
"MIT"
] | 1 |
2021-09-28T15:15:44.000Z
|
2021-09-28T15:15:44.000Z
|
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import CosineAnnealingLR, CosineAnnealingWarmRestarts
import itertools
import matplotlib.pyplot as plt
initial_lr = 0.1
net_1 = model()
optimizer_1 = torch.optim.Adam(net_1.parameters(), lr=initial_lr)
scheduler_1 = CosineAnnealingWarmRestarts(optimizer_1, T_0=1)
print("", optimizer_1.defaults['lr'])
lr_list = [] # lr
for epoch in range(0, 6):
# train
for i in range(int(30000/32)):
optimizer_1.zero_grad()
optimizer_1.step()
print("%depoch%f" % (epoch, optimizer_1.param_groups[0]['lr']))
lr_list.append(optimizer_1.param_groups[0]['lr'])
scheduler_1.step((epoch+i+1)/int(30000/32))
# lr
plt.plot(lr_list)
plt.xlabel("epoch")
plt.ylabel("lr")
plt.title("learning rate's curve changes as epoch goes on!")
plt.show()
| 24.155556 | 83 | 0.689052 |
39637ce1898c8dbfd20a89d25579fc15ae6c2bcd
| 432 |
py
|
Python
|
events_calendar/urls.py
|
mkbeh/Site-Nordic-Walking-
|
ba98f41db09ed448ecc4db175f65ef4fa2d64979
|
[
"MIT"
] | null | null | null |
events_calendar/urls.py
|
mkbeh/Site-Nordic-Walking-
|
ba98f41db09ed448ecc4db175f65ef4fa2d64979
|
[
"MIT"
] | 8 |
2021-04-08T21:57:55.000Z
|
2022-03-12T00:50:38.000Z
|
events_calendar/urls.py
|
mkbeh/Site-Nordic-Walking-
|
ba98f41db09ed448ecc4db175f65ef4fa2d64979
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import events_calendar, calendar_event_detail, past_competitions
app_name = 'events_calendar'
urlpatterns = [
path('past_competitions/', past_competitions, name='past_competitions'),
path('<int:year>/<int:month>/<int:day>/<int:hour>/<slug:event>/',
calendar_event_detail, name='calendar_event_detail'),
path('<int:days>', events_calendar, name='events_calendar'),
]
| 30.857143 | 76 | 0.733796 |
39642b71284a9db7523df49c8dca22286f61d556
| 1,236 |
py
|
Python
|
examples/linear_regression/01_linear_regression.py
|
zhaoshiying97/trading_gym
|
d4af8d724efa17420e6ebb430f6f9d4f08c6f83a
|
[
"Apache-2.0"
] | 32 |
2019-12-06T19:23:51.000Z
|
2022-03-08T06:08:58.000Z
|
examples/linear_regression/01_linear_regression.py
|
zhaoshiying97/trading_gym
|
d4af8d724efa17420e6ebb430f6f9d4f08c6f83a
|
[
"Apache-2.0"
] | 2 |
2020-02-20T11:04:07.000Z
|
2020-03-12T08:47:54.000Z
|
examples/linear_regression/01_linear_regression.py
|
zhaoshiying97/trading_gym
|
d4af8d724efa17420e6ebb430f6f9d4f08c6f83a
|
[
"Apache-2.0"
] | 15 |
2019-12-12T07:43:34.000Z
|
2022-03-06T13:02:39.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pdb
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from trading_gym.utils.data.toy import create_toy_data
from trading_gym.envs.portfolio_gym.portfolio_gym import PortfolioTradingGym
order_book_id_number = 100
toy_data = create_toy_data(order_book_ids_number=order_book_id_number, feature_number=10, start="2019-05-01", end="2019-12-12", frequency="D")
env = PortfolioTradingGym(data_df=toy_data, sequence_window=1, add_cash=False)
state = env.reset()
while True:
next_state, reward, done, info = env.step(action=None)
label = info["one_step_fwd_returns"]
print(state)
print(label)
#
regressor = LinearRegression()
regressor.fit(state.values, label.values)
#display and store
print(regressor.coef_)
env.experience_buffer["coef"].append(regressor.coef_)
#
if done:
break
else:
state = next_state
#
factor_returns = pd.DataFrame(np.array(env.experience_buffer["coef"]), index=env.experience_buffer["dt"], columns=toy_data.columns[:-1])
cum_factor_returns = (factor_returns +1).cumprod()
cum_factor_returns.plot(title="Cumulative Factor Return",linewidth=2.2)
| 30.9 | 142 | 0.741909 |
3965e8f70ee4cbba8c4a1ffa659f82e9962bbdcf
| 619 |
py
|
Python
|
migrations/versions/6f98e24760d_session_speaker.py
|
jace/goafunnel
|
5ff25f0e6a247ff1f6e87fce2a793d1775476cc0
|
[
"BSD-2-Clause"
] | null | null | null |
migrations/versions/6f98e24760d_session_speaker.py
|
jace/goafunnel
|
5ff25f0e6a247ff1f6e87fce2a793d1775476cc0
|
[
"BSD-2-Clause"
] | null | null | null |
migrations/versions/6f98e24760d_session_speaker.py
|
jace/goafunnel
|
5ff25f0e6a247ff1f6e87fce2a793d1775476cc0
|
[
"BSD-2-Clause"
] | null | null | null |
"""session speaker
Revision ID: 6f98e24760d
Revises: 58588eba8cb8
Create Date: 2013-11-22 17:28:47.751025
"""
# revision identifiers, used by Alembic.
revision = '6f98e24760d'
down_revision = '58588eba8cb8'
from alembic import op
import sqlalchemy as sa
| 22.925926 | 89 | 0.6979 |
39671833a02d25c6d6b9a61a074e54f03e6112e8
| 1,124 |
py
|
Python
|
decision_tree/dt_author_id.py
|
ncfausti/udacity-machine-learning
|
223eb1821e739d048d278629a2e466b3f2af8912
|
[
"MIT"
] | null | null | null |
decision_tree/dt_author_id.py
|
ncfausti/udacity-machine-learning
|
223eb1821e739d048d278629a2e466b3f2af8912
|
[
"MIT"
] | null | null | null |
decision_tree/dt_author_id.py
|
ncfausti/udacity-machine-learning
|
223eb1821e739d048d278629a2e466b3f2af8912
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
"""
this is the code to accompany the Lesson 3 (decision tree) mini-project
use an DT to identify emails from the Enron corpus by their authors
Sara has label 0
Chris has label 1
"""
import sys
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
from sklearn import tree
from sklearn.metrics import accuracy_score
import time
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
clf = tree.DecisionTreeClassifier(min_samples_split = 40)
clf = clf.fit(features_train, labels_train)
prediction = clf.predict(features_test)
accuracy = accuracy_score(prediction, labels_test)
print("Accuracy: %.6f" % accuracy)
print("Feature length: %d" % len(features_train[0]))
#########################################################
### your code goes here ###
#########################################################
| 24.434783 | 75 | 0.674377 |
3968419bade051f1706f219d6c57e614a8cbfb88
| 49,588 |
py
|
Python
|
climateeconomics/tests/_l1_test_energy_global_values.py
|
os-climate/witness-core
|
3ef9a44d86804c5ad57deec3c9916348cb3bfbb8
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 1 |
2022-01-14T06:37:42.000Z
|
2022-01-14T06:37:42.000Z
|
climateeconomics/tests/_l1_test_energy_global_values.py
|
os-climate/witness-core
|
3ef9a44d86804c5ad57deec3c9916348cb3bfbb8
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
climateeconomics/tests/_l1_test_energy_global_values.py
|
os-climate/witness-core
|
3ef9a44d86804c5ad57deec3c9916348cb3bfbb8
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
'''
mode: python; py-indent-offset: 4; tab-width: 4; coding: utf-8
Copyright (C) 2020 Airbus SAS
'''
import unittest
import time
import numpy as np
import pandas as pd
from sos_trades_core.execution_engine.execution_engine import ExecutionEngine
from climateeconomics.sos_processes.iam.witness.witness_dev.usecase_witness import Study as Study_open
if '__main__' == __name__:
t0 = time.time()
cls = TestGlobalEnergyValues()
cls.setUp()
cls.test_03_check_net_production_values()
print(f'Time : {time.time() - t0} s')
| 47.680769 | 304 | 0.638239 |
396aa7d766efce4140f100be9476c86629b27ef9
| 11,383 |
py
|
Python
|
bmtk/simulator/bionet/modules/save_synapses.py
|
tjbanks/bmtk
|
52fee3b230ceb14a666c46f57f2031c38f1ac5b1
|
[
"BSD-3-Clause"
] | 1 |
2019-03-27T12:23:09.000Z
|
2019-03-27T12:23:09.000Z
|
bmtk/simulator/bionet/modules/save_synapses.py
|
tjbanks/bmtk
|
52fee3b230ceb14a666c46f57f2031c38f1ac5b1
|
[
"BSD-3-Clause"
] | null | null | null |
bmtk/simulator/bionet/modules/save_synapses.py
|
tjbanks/bmtk
|
52fee3b230ceb14a666c46f57f2031c38f1ac5b1
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import csv
import h5py
import numpy as np
from neuron import h
from .sim_module import SimulatorMod
from bmtk.simulator.bionet.biocell import BioCell
from bmtk.simulator.bionet.io_tools import io
from bmtk.simulator.bionet.pointprocesscell import PointProcessCell
pc = h.ParallelContext()
MPI_RANK = int(pc.id())
N_HOSTS = int(pc.nhost())
| 48.233051 | 139 | 0.598876 |
396be9b8e76a36fa6d51ae0f674f69f4c1dcf376
| 1,217 |
py
|
Python
|
pydouyu/packet_util.py
|
Kexiii/pydouyu
|
494732159980b7b71575e6757899c48052c6c2e0
|
[
"MIT"
] | 11 |
2019-02-22T01:02:32.000Z
|
2021-12-15T08:50:26.000Z
|
pydouyu/packet_util.py
|
Kexiii/pydouyu
|
494732159980b7b71575e6757899c48052c6c2e0
|
[
"MIT"
] | 2 |
2020-07-05T01:26:18.000Z
|
2021-01-07T15:22:57.000Z
|
pydouyu/packet_util.py
|
Kexiii/pydouyu
|
494732159980b7b71575e6757899c48052c6c2e0
|
[
"MIT"
] | 3 |
2019-04-23T01:22:20.000Z
|
2021-12-04T09:09:16.000Z
|
import time
client_msg_type = 689
reserved_data_field = 0
| 23.403846 | 66 | 0.632703 |
396d4f672042b6ba26b0ebbbfccf8610a433735a
| 2,976 |
py
|
Python
|
scripts/staging/sklearn/mappers/supervised.py
|
mgd-hin/systemds
|
08944a7305cbc4f4d9cbbd4565efa8bcc93b82e3
|
[
"Apache-2.0"
] | 372 |
2017-06-09T01:02:53.000Z
|
2020-06-24T05:45:00.000Z
|
scripts/staging/sklearn/mappers/supervised.py
|
ywcb00/systemds
|
5cc523971854cdf4f22e6199987a86e213fae4e2
|
[
"Apache-2.0"
] | 418 |
2017-06-08T16:27:44.000Z
|
2020-06-25T12:15:54.000Z
|
scripts/staging/sklearn/mappers/supervised.py
|
ywcb00/systemds
|
5cc523971854cdf4f22e6199987a86e213fae4e2
|
[
"Apache-2.0"
] | 190 |
2017-06-08T19:32:54.000Z
|
2020-06-15T12:26:12.000Z
|
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
from .mapper import Mapper
| 33.438202 | 80 | 0.576277 |
396e8a1e3e6aa7c66751f496564ba6b53523d4aa
| 43 |
py
|
Python
|
homemade_steganog/__init__.py
|
zoomie/homemade_steganog
|
1ab0a140b6a2e0d9d36073d067a2c808c97adf38
|
[
"MIT"
] | 1 |
2019-03-12T13:25:43.000Z
|
2019-03-12T13:25:43.000Z
|
homemade_steganog/__init__.py
|
zoomie/homemade_encryption
|
1ab0a140b6a2e0d9d36073d067a2c808c97adf38
|
[
"MIT"
] | 4 |
2020-03-24T16:43:01.000Z
|
2022-03-11T23:39:53.000Z
|
homemade_steganog/__init__.py
|
zoomie/homemade_encryption
|
1ab0a140b6a2e0d9d36073d067a2c808c97adf38
|
[
"MIT"
] | null | null | null |
from .home import Steg
__all__ = ['Steg',]
| 14.333333 | 22 | 0.674419 |
396fa59895ef035568d0b517a96fd649c4c2ec84
| 4,364 |
py
|
Python
|
xyw_macro/win32.py
|
xue0228/keyboard
|
dcb0def1d87a9197676c0f405b980a67e128ab24
|
[
"MIT"
] | null | null | null |
xyw_macro/win32.py
|
xue0228/keyboard
|
dcb0def1d87a9197676c0f405b980a67e128ab24
|
[
"MIT"
] | null | null | null |
xyw_macro/win32.py
|
xue0228/keyboard
|
dcb0def1d87a9197676c0f405b980a67e128ab24
|
[
"MIT"
] | null | null | null |
import ctypes
from ctypes import wintypes, windll
import win32api
import win32con
import win32gui
# PUL = ctypes.POINTER(ctypes.c_ulong)
PUL = ctypes.c_void_p
#
HookProc = ctypes.WINFUNCTYPE(
wintypes.LPARAM,
ctypes.c_int32, wintypes.WPARAM, ctypes.POINTER(KeyBdMsg))
#
SendInput = windll.user32.SendInput
SendInput.argtypes = (
wintypes.UINT,
ctypes.POINTER(Input),
ctypes.c_int)
#
GetMessage = windll.user32.GetMessageA
GetMessage.argtypes = (
wintypes.MSG,
wintypes.HWND,
wintypes.UINT,
wintypes.UINT)
#
SetWindowsHookEx = windll.user32.SetWindowsHookExA
SetWindowsHookEx.argtypes = (
ctypes.c_int,
HookProc,
wintypes.HINSTANCE,
wintypes.DWORD)
#
UnhookWindowsHookEx = windll.user32.UnhookWindowsHookEx
UnhookWindowsHookEx.argtypes = (
wintypes.HHOOK,)
#
CallNextHookEx = windll.user32.CallNextHookEx
CallNextHookEx.argtypes = (
wintypes.HHOOK,
ctypes.c_int,
wintypes.WPARAM,
KeyBdMsg)
GetAsyncKeyState = windll.user32.GetAsyncKeyState
GetAsyncKeyState.argtypes = (
ctypes.c_int,
)
GetMessageExtraInfo = windll.user32.GetMessageExtraInfo
SetMessageExtraInfo = windll.user32.SetMessageExtraInfo
SetMessageExtraInfo.argtypes = (
wintypes.LPARAM,
)
def send_kb_event(v_key, is_pressed):
"""
dwExtraInfo228
:param v_key:
:param is_pressed:
:return:
"""
extra = 228
li = InputUnion()
flag = KeyBdInput.KEYUP if not is_pressed else 0
li.ki = KeyBdInput(v_key, 0x48, flag, 0, extra)
input = Input(Input.KEYBOARD, li)
return SendInput(1, ctypes.pointer(input), ctypes.sizeof(input))
| 21.82 | 68 | 0.632676 |
397163cbc30071660c1df03a91c22f9cdffa46d3
| 496 |
py
|
Python
|
helpdesk/simple/views.py
|
fratoj/helpdesk
|
302c41491f26432bd65e468f015cdb123a47bcad
|
[
"MIT"
] | null | null | null |
helpdesk/simple/views.py
|
fratoj/helpdesk
|
302c41491f26432bd65e468f015cdb123a47bcad
|
[
"MIT"
] | 4 |
2021-04-08T21:51:21.000Z
|
2021-06-10T20:21:24.000Z
|
helpdesk/simple/views.py
|
fratoj/helpdesk
|
302c41491f26432bd65e468f015cdb123a47bcad
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
import numpy as np
| 21.565217 | 52 | 0.59879 |
397474e797b04315ff3ee3188dba1be27f9df132
| 752 |
py
|
Python
|
fullthrottleapp/models.py
|
Pranjali16/FullThrottle-Project
|
bb6fbd3783d22c2e47ad85687e18f02a30c69799
|
[
"Apache-2.0"
] | null | null | null |
fullthrottleapp/models.py
|
Pranjali16/FullThrottle-Project
|
bb6fbd3783d22c2e47ad85687e18f02a30c69799
|
[
"Apache-2.0"
] | null | null | null |
fullthrottleapp/models.py
|
Pranjali16/FullThrottle-Project
|
bb6fbd3783d22c2e47ad85687e18f02a30c69799
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
| 34.181818 | 91 | 0.679521 |
3974ecf545e9249007cc970e291df529ea220e8f
| 83 |
py
|
Python
|
devind_helpers/validator/__init__.py
|
devind-team/devind-django-helpers
|
5c64d46a12802bbe0b70e44aa9d19bf975511b6e
|
[
"MIT"
] | null | null | null |
devind_helpers/validator/__init__.py
|
devind-team/devind-django-helpers
|
5c64d46a12802bbe0b70e44aa9d19bf975511b6e
|
[
"MIT"
] | 4 |
2022-02-18T09:24:05.000Z
|
2022-03-31T16:46:29.000Z
|
devind_helpers/validator/__init__.py
|
devind-team/devind-django-helpers
|
5c64d46a12802bbe0b70e44aa9d19bf975511b6e
|
[
"MIT"
] | null | null | null |
from .validators import Validator, BaseRule
__all__ = ('Validator', 'BaseRule',)
| 16.6 | 43 | 0.73494 |
3975e522eae96a6443ccb6146ef3bb31b2d6df06
| 1,320 |
py
|
Python
|
examples/bruker_processed_1d/bruker_processed_1d.py
|
genematx/nmrglue
|
8a24cf6cbd18451e552fc0673b84c42d1dcb69a2
|
[
"BSD-3-Clause"
] | 150 |
2015-01-16T12:24:13.000Z
|
2022-03-03T18:01:18.000Z
|
examples/bruker_processed_1d/bruker_processed_1d.py
|
genematx/nmrglue
|
8a24cf6cbd18451e552fc0673b84c42d1dcb69a2
|
[
"BSD-3-Clause"
] | 129 |
2015-01-13T04:58:56.000Z
|
2022-03-02T13:39:16.000Z
|
examples/bruker_processed_1d/bruker_processed_1d.py
|
genematx/nmrglue
|
8a24cf6cbd18451e552fc0673b84c42d1dcb69a2
|
[
"BSD-3-Clause"
] | 88 |
2015-02-16T20:04:12.000Z
|
2022-03-10T06:50:30.000Z
|
#! /usr/bin/env python
"""
Compare bruker read_pdata to read.
"""
import nmrglue as ng
import matplotlib.pyplot as plt
# read in the data
data_dir = "data/bruker_exp/1/pdata/1"
# From pre-procced data.
dic, data = ng.bruker.read_pdata(data_dir, scale_data=True)
udic = ng.bruker.guess_udic(dic, data)
uc = ng.fileiobase.uc_from_udic(udic)
ppm_scale = uc.ppm_scale()
# From FID
dic1, data1 = ng.bruker.read(data_dir)
# remove the digital filter, this data is from an analog spectrometer.
# data = ng.bruker.remove_digital_filter(dic, data)
# process the spectrum
data1 = ng.proc_base.ls(data1, 1) # left shift
data1 = ng.proc_base.gm(data1, g2=1/2.8e3) # To match proc data...
data1 = ng.proc_base.zf_size(data1, 1024*32) # zero fill
data1 = ng.proc_base.fft_positive(data1) # FT
data1 = ng.proc_base.ps(data1, p0=93) # phase is 180 off Bruker
data1 = ng.proc_base.di(data1) # discard
udic1 = ng.bruker.guess_udic(dic1, data1)
uc1 = ng.fileiobase.uc_from_udic(udic1)
ppm_scale1 = uc1.ppm_scale()
# plot the spectrum
fig = plt.figure()
plt.hold(True)
plt.plot(ppm_scale, data)
plt.plot(ppm_scale1, data1)
plt.hold(False)
plt.xlim([50, -50])
plt.xlabel('Carbon Chemical shift (ppm from neat TMS)')
plt.title('bruker.read_pdata vs bruker.read, note ppm axis')
plt.show()
| 28.085106 | 71 | 0.712121 |
397645cb5f3148b59ab74fb77253d9299c79d101
| 4,404 |
py
|
Python
|
tests/unit/test_posts_get_logic.py
|
claranet-ch/aws-sam-application-template-python
|
b835ef9295e4820110fd53f50619e4fea7493155
|
[
"CC-BY-4.0"
] | null | null | null |
tests/unit/test_posts_get_logic.py
|
claranet-ch/aws-sam-application-template-python
|
b835ef9295e4820110fd53f50619e4fea7493155
|
[
"CC-BY-4.0"
] | null | null | null |
tests/unit/test_posts_get_logic.py
|
claranet-ch/aws-sam-application-template-python
|
b835ef9295e4820110fd53f50619e4fea7493155
|
[
"CC-BY-4.0"
] | null | null | null |
import io
import os
import unittest
import boto3
from botocore.response import StreamingBody
from botocore.stub import Stubber
from functions.posts_get.posts_get_logic import posts_get_logic
| 35.516129 | 84 | 0.449818 |
3978056ea17d8290a8897ffe9ef1bc60af963d5f
| 21,050 |
py
|
Python
|
firepy/model/geometry.py
|
KBeno/firefly-lca
|
a081b05f5d66951792bd00d2bb6ae1f8e43235e0
|
[
"MIT"
] | 3 |
2020-06-16T13:39:31.000Z
|
2022-01-10T09:34:52.000Z
|
firepy/model/geometry.py
|
KBeno/boblica
|
a081b05f5d66951792bd00d2bb6ae1f8e43235e0
|
[
"MIT"
] | null | null | null |
firepy/model/geometry.py
|
KBeno/boblica
|
a081b05f5d66951792bd00d2bb6ae1f8e43235e0
|
[
"MIT"
] | null | null | null |
from typing import Union, List
import copy
import math
import numpy as np
"""
Principles:
- geometry objects are defined by the minimum required information
- Points are made of coordinates (floats), everything else is based on Points except for Vectors
"""
def move(obj: Union[Point, Line, Rectangle, Box, Face], vector: Vector, inplace=False):
if isinstance(obj, Point):
return obj + vector
else:
if inplace:
new_obj = obj
else:
new_obj = copy.deepcopy(obj)
for param, val in new_obj.__dict__.items():
if isinstance(val, (Point, Line, Rectangle, Box, Face)):
# love recursion
new_obj.__dict__[param] = move(val, vector)
elif isinstance(val, list):
new_obj.__dict__[param] = [move(p, vector) for p in val]
return new_obj
def rotate_xy(obj: Union[Point, Line, Rectangle, Box, Face], angle: float,
center: Point = Point(0, 0, 0), inplace=False):
"""
Rotate objects in the xy plane (around z axis)
:param obj: object to rotate
:param angle: angle to rotate with
:param center: center to rotate around
:param inplace: set True to modify the object instance itself
:return: rotated object
"""
if isinstance(obj, Point):
# move point to origin
obj_origin = move(obj, Point(0, 0, 0) - center)
# apply rotation around origin
new_point = Point(
x=obj_origin.x * math.cos(math.radians(angle)) - obj_origin.y * math.sin(math.radians(angle)),
y=obj_origin.x * math.sin(math.radians(angle)) + obj_origin.y * math.cos(math.radians(angle)),
z=obj_origin.z
)
# move back
return move(new_point, center - Point(0, 0, 0))
else:
if inplace:
new_obj = obj
else:
new_obj = copy.deepcopy(obj)
for param, val in new_obj.__dict__.items():
if isinstance(val, (Point, Line, Rectangle, Box, Face)):
# love recursion
new_obj.__dict__[param] = rotate_xy(val, angle, center)
elif isinstance(val, list):
new_obj.__dict__[param] = [rotate_xy(p, angle, center) for p in val]
return new_obj
| 34.850993 | 113 | 0.526366 |
3978db58ab61262a3273d3565d293223c2d9c041
| 556 |
py
|
Python
|
danmu/log.py
|
awesome-archive/danmu
|
2f4e943d859cecd31b289e21984e35a34515b71f
|
[
"WTFPL"
] | null | null | null |
danmu/log.py
|
awesome-archive/danmu
|
2f4e943d859cecd31b289e21984e35a34515b71f
|
[
"WTFPL"
] | null | null | null |
danmu/log.py
|
awesome-archive/danmu
|
2f4e943d859cecd31b289e21984e35a34515b71f
|
[
"WTFPL"
] | null | null | null |
import os, logging
if not os.path.exists('config'): os.mkdir('config')
log = logging.getLogger('danmu')
log.setLevel(logging.DEBUG)
fileHandler = logging.FileHandler(os.path.join('config', 'run.log'), encoding = 'utf8')
fileHandler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)-17s <%(message)s> %(levelname)s %(filename)s[%(lineno)d]',
datefmt='%Y%m%d %H:%M:%S')
fileHandler.setFormatter(formatter)
log.addHandler(fileHandler)
if __name__ == '__main__':
log.debug('This is debug')
log.info('This is info')
| 34.75 | 101 | 0.690647 |
3978e2b002dc50ec5e34788e51f2d661aefcb01f
| 2,016 |
py
|
Python
|
vector_env_comparison.py
|
neuroevolution-ai/NaturalNets-PerformanceTests
|
de7d99424cc9ab29fdc3691c12d20d0a35afe0fe
|
[
"MIT"
] | null | null | null |
vector_env_comparison.py
|
neuroevolution-ai/NaturalNets-PerformanceTests
|
de7d99424cc9ab29fdc3691c12d20d0a35afe0fe
|
[
"MIT"
] | 1 |
2021-02-13T18:55:40.000Z
|
2021-02-13T18:55:40.000Z
|
vector_env_comparison.py
|
neuroevolution-ai/NaturalNets-PerformanceTests
|
de7d99424cc9ab29fdc3691c12d20d0a35afe0fe
|
[
"MIT"
] | null | null | null |
import multiprocessing
import time
import gym
import gym3
import numpy as np
from gym.vector import make as make_vec_env
from procgen import ProcgenGym3Env
population_size = 112
number_env_steps = 1000
if __name__ == "__main__":
main()
| 22.651685 | 98 | 0.671627 |
397b7ca45c3f9235af0d2fa52c9c29634429cebe
| 1,641 |
py
|
Python
|
raiden_api/model/requests.py
|
kelsos/test-enviroment-scripts
|
ab8d9f1e9a1deed048dcc93ec9d014bf6b58252d
|
[
"MIT"
] | 1 |
2019-03-28T00:24:48.000Z
|
2019-03-28T00:24:48.000Z
|
raiden_api/model/requests.py
|
kelsos/test-enviroment-scripts
|
ab8d9f1e9a1deed048dcc93ec9d014bf6b58252d
|
[
"MIT"
] | 4 |
2019-03-26T15:27:20.000Z
|
2019-04-29T10:46:08.000Z
|
raiden_api/model/requests.py
|
kelsos/test-enviroment-scripts
|
ab8d9f1e9a1deed048dcc93ec9d014bf6b58252d
|
[
"MIT"
] | 2 |
2019-03-26T14:27:24.000Z
|
2019-03-29T10:28:40.000Z
|
import time
import typing
| 26.467742 | 69 | 0.597806 |
397c69961dfa90f232f4ac9c29a73bc3e9510c76
| 823 |
py
|
Python
|
Dynamic/KnapNoRep.py
|
mladuke/Algorithms
|
eab5d89c5f496b2849f0646dbfa3a4db93a0b391
|
[
"MIT"
] | null | null | null |
Dynamic/KnapNoRep.py
|
mladuke/Algorithms
|
eab5d89c5f496b2849f0646dbfa3a4db93a0b391
|
[
"MIT"
] | null | null | null |
Dynamic/KnapNoRep.py
|
mladuke/Algorithms
|
eab5d89c5f496b2849f0646dbfa3a4db93a0b391
|
[
"MIT"
] | null | null | null |
# adapted from https://sites.google.com/site/mikescoderama/Home/0-1-knapsack-problem-in-p
W = 10
v = [9, 14, 16, 30]
w = [2, 3, 4, 6]
print(zeroOneKnapsack(v, w, W))
| 24.939394 | 90 | 0.509113 |
397c6d5c141c7b6d17cf9a8f120d47ea7101ea9f
| 587 |
py
|
Python
|
tasks/migrations/0002_auto_20201008_2236.py
|
milenakowalska/todolist
|
5b5208b952e88334453935652424f8168ecf9113
|
[
"MIT"
] | null | null | null |
tasks/migrations/0002_auto_20201008_2236.py
|
milenakowalska/todolist
|
5b5208b952e88334453935652424f8168ecf9113
|
[
"MIT"
] | null | null | null |
tasks/migrations/0002_auto_20201008_2236.py
|
milenakowalska/todolist
|
5b5208b952e88334453935652424f8168ecf9113
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.2 on 2020-10-08 22:36
from django.db import migrations, models
| 24.458333 | 112 | 0.575809 |
397e9f0c2652f385de08911a9951e3eb07c5c86a
| 874 |
py
|
Python
|
tools/one-offs/convert-genres.py
|
DrDos0016/z2
|
b63e77129fefcb4f990ee1cb9952f4f708ee3a2b
|
[
"MIT"
] | 3 |
2017-05-01T19:53:57.000Z
|
2018-08-27T20:14:43.000Z
|
tools/one-offs/convert-genres.py
|
DrDos0016/z2
|
b63e77129fefcb4f990ee1cb9952f4f708ee3a2b
|
[
"MIT"
] | null | null | null |
tools/one-offs/convert-genres.py
|
DrDos0016/z2
|
b63e77129fefcb4f990ee1cb9952f4f708ee3a2b
|
[
"MIT"
] | 1 |
2018-08-27T20:14:46.000Z
|
2018-08-27T20:14:46.000Z
|
import os
import sys
import django
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "museum.settings")
django.setup()
from django.contrib.auth.models import User # noqa: E402
from museum_site.models import * # noqa: E402
if __name__ == '__main__':
main()
| 23.621622 | 98 | 0.639588 |
397ee9d80cbe93ca71977088ed64acae351304fd
| 553 |
py
|
Python
|
python/learn/PythonDataVisualizationCookbookSE_Code/Chapter 04/ch04_rec03_plot_with_table.py
|
flyingwjw/Documentation
|
567608f388ca369b864c2d75a94647801b5dfa1e
|
[
"Unlicense"
] | 26 |
2016-08-25T01:33:36.000Z
|
2022-03-20T11:33:31.000Z
|
python/learn/PythonDataVisualizationCookbookSE_Code/Chapter 04/ch04_rec03_plot_with_table.py
|
flyingwjw/Documentation
|
567608f388ca369b864c2d75a94647801b5dfa1e
|
[
"Unlicense"
] | null | null | null |
python/learn/PythonDataVisualizationCookbookSE_Code/Chapter 04/ch04_rec03_plot_with_table.py
|
flyingwjw/Documentation
|
567608f388ca369b864c2d75a94647801b5dfa1e
|
[
"Unlicense"
] | 31 |
2016-08-16T15:32:46.000Z
|
2021-01-26T19:16:48.000Z
|
import matplotlib.pylab as plt
import numpy as np
plt.figure()
axes=plt.gca()
y= np.random.randn(9)
col_labels=['col1','col2','col3']
row_labels=['row1','row2','row3']
table_vals=[[11,12,13],[21,22,23],[28,29,30]]
row_colors=['red','gold','green']
the_table = plt.table(cellText=table_vals,
colWidths = [0.1]*3,
rowLabels=row_labels,
colLabels=col_labels,
rowColours=row_colors,
loc='upper right')
plt.text(12,3.4,'Table Title',size=8)
plt.plot(y)
plt.show()
| 24.043478 | 45 | 0.593128 |
3980310409feb9f0ac71dbf46448b126022d5366
| 1,258 |
py
|
Python
|
support.py
|
ipascual1/spootnik_bot
|
ad7658f49705b1ce57bcc5ed84006ef658f63fa3
|
[
"Unlicense"
] | null | null | null |
support.py
|
ipascual1/spootnik_bot
|
ad7658f49705b1ce57bcc5ed84006ef658f63fa3
|
[
"Unlicense"
] | null | null | null |
support.py
|
ipascual1/spootnik_bot
|
ad7658f49705b1ce57bcc5ed84006ef658f63fa3
|
[
"Unlicense"
] | null | null | null |
import re
import os
def extract(regularE : str, init : str, stop : str, string : str):
"""
regularE: RE to catch string
init: First string to replace
stop: Last string to replace
string: String to apply the RE
With a regular expression and init and stop to replace, gets a
substring from string argument and returns it.
"""
return re.findall(regularE, string)[0]\
.replace(init, "")\
.replace(stop, "")
def get_term_clock_pid():
"""
return: int with the PID of term_clock;
-1 if process doesn't exist.
Extracts the PID of term_clock process with systemctl.
"""
# sputnikDriver prints in their own console all the PIDs of its subprocesses
ret = os.popen("systemctl status sputnikDriver.service").read()
if ret == "":
return -1
return int(extract(r"term_clock .+ PID", "term_clock ", " PID", ret))
def check_alive():
"""
return: True if java process is running;
False otherwise
Check if a java process in sputnikDriver (i.e. the Minecraft Server) is running
"""
ret = os.popen("systemctl status sputnikDriver.service").read()
return "java" in ret
| 29.255814 | 83 | 0.612878 |
39806196aae9564f8e399df05393bb7226dec4f7
| 1,054 |
py
|
Python
|
steam.py
|
iganeshk/alfred-totp
|
f9c17fe83025c99cbfaf5413d20212aa63d7e0d5
|
[
"MIT"
] | 7 |
2020-04-12T21:16:41.000Z
|
2022-01-09T08:55:22.000Z
|
steam.py
|
iganeshk/alfred-totp
|
f9c17fe83025c99cbfaf5413d20212aa63d7e0d5
|
[
"MIT"
] | null | null | null |
steam.py
|
iganeshk/alfred-totp
|
f9c17fe83025c99cbfaf5413d20212aa63d7e0d5
|
[
"MIT"
] | 1 |
2022-03-26T16:04:53.000Z
|
2022-03-26T16:04:53.000Z
|
#!/usr/env/python3
# coding=utf-8
#
# Generate Steamguard OTP with the shared secret passed as an argument
# Ganesh Velu
import hmac
import base64
import hashlib
import codecs
import time
import sys
STEAM_DECODE_CHARS = ['2', '3', '4', '5', '6', '7', '8', '9',
'B', 'C', 'D', 'F', 'G', 'H', 'J', 'K',
'M', 'N', 'P', 'Q', 'R', 'T', 'V', 'W',
'X', 'Y']
if __name__ == '__main__':
print(get_authentication_code(sys.argv[1]), end='')
| 29.277778 | 90 | 0.586338 |
39812282916a91f854eceaec095dab9dd29955a6
| 1,783 |
py
|
Python
|
igvc_ws/src/igvc_nav/src/path_planner/node.py
|
SoonerRobotics/igvc_software_2022
|
906e6a4fca22d2b0c06ef1b8a4a3a9df7f1d17dd
|
[
"MIT"
] | 4 |
2020-07-07T14:56:56.000Z
|
2021-08-13T23:31:07.000Z
|
igvc_ws/src/igvc_nav/src/path_planner/node.py
|
pradumn203/igvc-winners-2021
|
658233609054eafac59603a77b2a092dc002e145
|
[
"MIT"
] | 13 |
2019-11-12T02:57:54.000Z
|
2020-03-17T17:04:22.000Z
|
igvc_ws/src/igvc_nav/src/path_planner/node.py
|
pradumn203/igvc-winners-2021
|
658233609054eafac59603a77b2a092dc002e145
|
[
"MIT"
] | 3 |
2021-06-29T05:21:18.000Z
|
2021-08-23T05:03:27.000Z
|
"""
"""
| 22.858974 | 68 | 0.528884 |
3982bd3c6134c4bd9c5526d9392f74c9c724e7ab
| 556 |
py
|
Python
|
makahiki/apps/widgets/energy_power_meter/views.py
|
justinslee/Wai-Not-Makahiki
|
4b7dd685012ec64758affe0ecee3103596d16aa7
|
[
"MIT"
] | 1 |
2015-07-22T11:31:20.000Z
|
2015-07-22T11:31:20.000Z
|
makahiki/apps/widgets/energy_power_meter/views.py
|
justinslee/Wai-Not-Makahiki
|
4b7dd685012ec64758affe0ecee3103596d16aa7
|
[
"MIT"
] | null | null | null |
makahiki/apps/widgets/energy_power_meter/views.py
|
justinslee/Wai-Not-Makahiki
|
4b7dd685012ec64758affe0ecee3103596d16aa7
|
[
"MIT"
] | null | null | null |
"""Handle rendering of the Energy Power Meter widget."""
from apps.widgets.resource_goal import resource_goal
def supply(request, page_name):
"""Return the view_objects content, which in this case is empty."""
_ = page_name
team = request.user.get_profile().team
if team:
interval = resource_goal.team_goal_settings(team, "energy").realtime_meter_interval
else:
interval = None
width = 300
height = 100
return {"interval": interval,
"width": width,
"height": height
}
| 26.47619 | 91 | 0.645683 |
3982edd57b175c1d224315f35831e37d04e0c726
| 1,408 |
py
|
Python
|
tools/generatekeypair.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 2,027 |
2019-11-12T18:05:48.000Z
|
2022-03-31T22:25:04.000Z
|
tools/generatekeypair.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 496 |
2019-11-12T18:13:37.000Z
|
2022-03-31T10:43:45.000Z
|
tools/generatekeypair.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 249 |
2019-11-12T18:02:27.000Z
|
2022-03-22T12:19:19.000Z
|
import argparse
import json
from authlib.jose import JsonWebKey
from cryptography.hazmat.primitives import serialization
def generate_key_pair(filename, kid=None):
"""
'kid' will default to the jwk thumbprint if not set explicitly.
Reference: https://tools.ietf.org/html/rfc7638
"""
options = {}
if kid:
options["kid"] = kid
jwk = JsonWebKey.generate_key("RSA", 2048, is_private=True, options=options)
print(("Writing public key to %s.jwk" % filename))
with open("%s.jwk" % filename, mode="w") as f:
f.truncate(0)
f.write(jwk.as_json())
print(("Writing key ID to %s.kid" % filename))
with open("%s.kid" % filename, mode="w") as f:
f.truncate(0)
f.write(jwk.as_dict()["kid"])
print(("Writing private key to %s.pem" % filename))
with open("%s.pem" % filename, mode="wb") as f:
f.truncate(0)
f.write(
jwk.get_private_key().private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
)
parser = argparse.ArgumentParser(description="Generates a key pair into files")
parser.add_argument("filename", help="The filename prefix for the generated key files")
args = parser.parse_args()
generate_key_pair(args.filename)
| 30.608696 | 87 | 0.648438 |
3983bdef6c20e9a6ac20cbeb01a996a5e1766f34
| 4,855 |
py
|
Python
|
hkpy/hkpyo/reasoners/simple_reasoner.py
|
renan-souza/hkpy
|
1fdcd3da3520e876f95295bf6d15e40581b2bb49
|
[
"MIT"
] | 7 |
2019-12-23T17:59:36.000Z
|
2022-02-17T19:35:32.000Z
|
hkpy/hkpyo/reasoners/simple_reasoner.py
|
renan-souza/hkpy
|
1fdcd3da3520e876f95295bf6d15e40581b2bb49
|
[
"MIT"
] | 9 |
2019-12-30T13:34:41.000Z
|
2021-07-16T22:46:06.000Z
|
hkpy/hkpyo/reasoners/simple_reasoner.py
|
renan-souza/hkpy
|
1fdcd3da3520e876f95295bf6d15e40581b2bb49
|
[
"MIT"
] | 2 |
2020-03-14T21:34:02.000Z
|
2021-06-12T00:10:43.000Z
|
###
# Copyright (c) 2019-present, IBM Research
# Licensed under The MIT License [see LICENSE for details]
###
from collections import defaultdict
from hkpy.hkpyo.model import HKOContext, HKOContextManager, HKOConcept, HKOSubConceptAxiom, HKOConjunctionExpression, \
HKODisjunctionExpression, HKOConceptAssertion, HKOIndividual, HKOPropertyAssertion, HKOLiteral, Union, HKOAxiom, \
HKOAssertion, HKOProperty
| 45.801887 | 119 | 0.65829 |
39846d963efc3c25f62f763940ae6d00481112ea
| 237 |
py
|
Python
|
coffeebar/admin.py
|
viktor-yakubiv/django-coffee
|
0a7d62a53db6af48fdc852fbb4dae43a0fc2b2ef
|
[
"MIT"
] | null | null | null |
coffeebar/admin.py
|
viktor-yakubiv/django-coffee
|
0a7d62a53db6af48fdc852fbb4dae43a0fc2b2ef
|
[
"MIT"
] | null | null | null |
coffeebar/admin.py
|
viktor-yakubiv/django-coffee
|
0a7d62a53db6af48fdc852fbb4dae43a0fc2b2ef
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Account, Product, Drink, Topping, Order
admin.site.register(Account)
admin.site.register(Product)
admin.site.register(Drink)
admin.site.register(Topping)
admin.site.register(Order)
| 21.545455 | 59 | 0.805907 |
398508cf7b96c7a53317b86338d3ac80d4ac69c4
| 106 |
py
|
Python
|
influxdb_client/client/__init__.py
|
rhajek/influxdb-client-python
|
852e6f1b1161df4d67eabc19cdb6b323a46b88e2
|
[
"MIT"
] | null | null | null |
influxdb_client/client/__init__.py
|
rhajek/influxdb-client-python
|
852e6f1b1161df4d67eabc19cdb6b323a46b88e2
|
[
"MIT"
] | null | null | null |
influxdb_client/client/__init__.py
|
rhajek/influxdb-client-python
|
852e6f1b1161df4d67eabc19cdb6b323a46b88e2
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from influxdb_client.client.influxdb_client import InfluxDBClient
| 26.5 | 65 | 0.896226 |
398533491570a42901637e1afb785d157af6a86a
| 809 |
py
|
Python
|
accounts/forms.py
|
mohsenamoon1160417237/Social_app
|
79fa0871f7b83648894941f9010f1d99f1b27ab3
|
[
"MIT"
] | null | null | null |
accounts/forms.py
|
mohsenamoon1160417237/Social_app
|
79fa0871f7b83648894941f9010f1d99f1b27ab3
|
[
"MIT"
] | null | null | null |
accounts/forms.py
|
mohsenamoon1160417237/Social_app
|
79fa0871f7b83648894941f9010f1d99f1b27ab3
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import User
from django import forms
from .models import UserProfile
| 17.212766 | 98 | 0.721879 |
3985a0d08f66c16279006e5cf92a0a215003522a
| 8,031 |
py
|
Python
|
prediction-experiments/python-nb/ov-predict/src/api/model_loader.py
|
ouyangzhiping/Info-extract
|
d8a7ca47201dad4d28b9b96861b0b1b3fc27c63a
|
[
"Apache-2.0"
] | 15 |
2019-02-25T09:53:37.000Z
|
2022-03-22T05:13:24.000Z
|
prediction-experiments/python-nb/ov-predict/src/api/model_loader.py
|
ouyangzhiping/Info-extract
|
d8a7ca47201dad4d28b9b96861b0b1b3fc27c63a
|
[
"Apache-2.0"
] | 8 |
2019-06-12T10:14:58.000Z
|
2021-08-15T08:04:10.000Z
|
prediction-experiments/python-nb/ov-predict/src/api/model_loader.py
|
ouyangzhiping/Info-extract
|
d8a7ca47201dad4d28b9b96861b0b1b3fc27c63a
|
[
"Apache-2.0"
] | 1 |
2022-03-15T16:45:35.000Z
|
2022-03-15T16:45:35.000Z
|
import sys
import numpy as np
import os
import requests
import json
import logging
from json import JSONEncoder
from keras.models import model_from_json
sys.path.append('..')
from preprocessing.InputHelper import InputHelper
from model.lstm import rmse
from model.lstm import buildModel
from keras.preprocessing.sequence import pad_sequences
sys.path.append('..')
'''
This is a stand-alone test for the python API service. It doesn't use Flask.
'''
OPTIMIZER = 'rmsprop'
NUM_CLASSES = 0
MAXLEN = 50
SAVED_MODEL_FILE = '../../saved_models/model.h5'
PUBMED_DIM = 200
VAL_DIMENSIONS = 5
TF_SERVING_HOSTNAME = os.environ.get("TF_SERVING_HOSTNAME", "")
TF_SERVING_PORT = os.environ.get("TF_SERVING_PORT", "")
USES_TF_SERVING = TF_SERVING_HOSTNAME != "" and TF_SERVING_PORT != ""
def get_model_json(saved_model):
print("Loading model from file {}".format(saved_model))
json_file = open(saved_model, 'r')
json_str = json_file.read()
json_file.close()
return json_str
def predict_outcome(inpH, model, test_instance_str):
x = inpH.tokenizer.texts_to_sequences([test_instance_str])
x = pad_sequences(x, padding='post', maxlen=MAXLEN)
y_preds = model.predict(x, steps=1)
return y_preds[0]
def predict_regression_outcome(model, model_name, test_input_batch):
y_preds = predict_outcome_local_or_api(model, model_name, test_input_batch)
return y_preds[:,0]
def predict_confidence(model, model_name, test_input_batch):
y_preds = predict_outcome_local_or_api(model, model_name, test_input_batch)
return np.max(y_preds, axis=1)
def predict_outcome_local_or_api(model, model_name, test_input_batch):
if USES_TF_SERVING:
return call_tf_serving_predict(model_name, test_input_batch)
else:
# in this case, "model" is the actual keras model
return predict_outcome_with_dynamic_vocabchange(model, test_input_batch)
def predict_outcome_with_dynamic_vocabchange(model, test_input_batch):
x_test = test_input_batch
print("x_test = {}".format(x_test))
y_preds = model.predict_on_batch(x_test)
print('y_preds = {}'.format(y_preds))
return y_preds
def call_tf_serving_predict(model_name, test_input_batch):
x_test = test_input_batch
logging.debug("x_test = {}".format(x_test))
url = get_tf_serving_predict_endpoint(model_name)
# batched instances
instances = x_test
json_post_body = json.dumps({"instances": instances}, cls=NumpyArrayEncoder)
r = requests.post(url, json_post_body)
logging.info(f"Response from {url}")
logging.info(r.text)
response = r.json()
return np.array(response["predictions"])
def get_tf_serving_predict_endpoint(model_name):
return "http://" + TF_SERVING_HOSTNAME + ":" + TF_SERVING_PORT + "/" \
+ "v1/models/" + model_name + ":predict"
def init_embedding(embfile):
inpH = InputHelper()
print("converting words to ids...")
inpH.convertWordsToIds(embfile)
print("vocab size = {}".format(inpH.vocab_size))
inpH.loadW2V(embfile)
return inpH
# Replace a node if the form C:<x>:0.1 with C:<x>:0.2 (the closest value with the same attrib-id in our vocabulary)
if __name__ == "__main__":
main(sys.argv[1:])
| 32.383065 | 115 | 0.707757 |
3986c0e0bd792870f8eee7d99d0e2fa5761fa22e
| 1,429 |
py
|
Python
|
blueprints/accounts/manage/config.py
|
GetmeUK/h51
|
17d4003336857514765a42a0853995fbe3da6525
|
[
"MIT"
] | null | null | null |
blueprints/accounts/manage/config.py
|
GetmeUK/h51
|
17d4003336857514765a42a0853995fbe3da6525
|
[
"MIT"
] | 4 |
2021-06-08T22:58:13.000Z
|
2022-03-12T00:53:18.000Z
|
blueprints/accounts/manage/config.py
|
GetmeUK/h51
|
17d4003336857514765a42a0853995fbe3da6525
|
[
"MIT"
] | null | null | null |
from manhattan.manage import config
from manhattan.nav import Nav, NavItem
from blueprints.accounts.manage import blueprint
from blueprints.accounts.models import Account
__all__ = ['AccountConfig']
| 26.462963 | 70 | 0.491952 |
3986fe60405cf4775e3e7c28b77f8afe1fba2cf3
| 599 |
py
|
Python
|
tests/test_fails.py
|
Alviner/wsrpc-aiohttp
|
12387f68b74587e52ae4b10f28892dbbb2afc32f
|
[
"MIT"
] | null | null | null |
tests/test_fails.py
|
Alviner/wsrpc-aiohttp
|
12387f68b74587e52ae4b10f28892dbbb2afc32f
|
[
"MIT"
] | null | null | null |
tests/test_fails.py
|
Alviner/wsrpc-aiohttp
|
12387f68b74587e52ae4b10f28892dbbb2afc32f
|
[
"MIT"
] | null | null | null |
from aiohttp import ClientConnectionError
from wsrpc_aiohttp.testing import BaseTestCase, async_timeout
| 28.52381 | 73 | 0.689482 |
398a3a700f8b78eced80ede2546a27f9c162d1aa
| 2,325 |
py
|
Python
|
devops/python/issuebot/applog.py
|
simahao/lily
|
c22ec37cb02374e94b41822eccc5e6d6aa7d0d25
|
[
"MIT"
] | 4 |
2020-11-16T06:24:19.000Z
|
2021-05-19T02:10:01.000Z
|
devops/python/issuebot/applog.py
|
simahao/lily
|
c22ec37cb02374e94b41822eccc5e6d6aa7d0d25
|
[
"MIT"
] | 5 |
2021-05-05T14:17:27.000Z
|
2021-09-30T08:47:23.000Z
|
devops/python/issuebot/applog.py
|
simahao/lily
|
c22ec37cb02374e94b41822eccc5e6d6aa7d0d25
|
[
"MIT"
] | 3 |
2021-02-22T01:38:49.000Z
|
2021-06-03T08:52:37.000Z
|
import logging
import logging.config
import os
LOG_DIR = os.path.dirname(os.path.abspath(__file__))
log_config = {
'version': 1,
'formatters': {
'verbose': {
'class': 'logging.Formatter',
'format': '%(asctime)s [%(name)s] %(levelname)-8s %(pathname)s:%(lineno)d - %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
'style': '%'
},
'simple': {
'class': 'logging.Formatter',
'format': '%(asctime)s %(levelname)-8s - %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
'style': '%'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'simple'
},
'octopus': {
'class': 'logging.FileHandler',
'level': 'INFO',
'filename': os.path.join(LOG_DIR, 'octopus.log'),
'mode': 'a',
'formatter': 'verbose',
'encoding': 'utf-8'
},
'surveillance': {
'class': 'logging.FileHandler',
'level': 'INFO',
'filename': os.path.join(LOG_DIR, 'surveillance.log'),
'mode': 'a',
'formatter': 'verbose',
'encoding': 'utf-8'
},
'file': {
'class': 'logging.FileHandler',
'level': 'INFO',
'filename': 'app.log',
'mode': 'a',
'formatter': 'verbose',
'encoding': 'utf-8'
},
'rotate_file': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'INFO',
'filename': 'app.log',
'mode': 'a',
'formatter': 'verbose',
'maxBytes': 10485760,
'backupCount': 3,
'encoding': 'utf-8'
}
},
'loggers': {
'Octopus': {
'handlers': ['octopus']
},
'Surveillance': {
'handlers': ['surveillance']
}
},
'root': {
'level': 'INFO',
'handlers': ['console']
}
}
# propagate default is true,so message is propagated its parent's logger until root
# e.x. Octopus flush message to file, and progagate message to root logger, and flush to console
logging.config.dictConfig(log_config)
| 29.43038 | 101 | 0.455054 |
398adc2cec18c8f88eebd57e5b5cd30a4eaccd31
| 5,280 |
py
|
Python
|
basket/BasketGlobals.py
|
Hartman-/Basket
|
7b9c174b031c9ffac2de886f5e149adcd5f7c83f
|
[
"BSD-3-Clause"
] | 2 |
2017-02-07T11:28:58.000Z
|
2017-12-01T05:41:36.000Z
|
basket/BasketGlobals.py
|
Hartman-/Basket
|
7b9c174b031c9ffac2de886f5e149adcd5f7c83f
|
[
"BSD-3-Clause"
] | 25 |
2016-08-18T01:16:59.000Z
|
2017-02-11T03:57:20.000Z
|
basket/BasketGlobals.py
|
Hartman-/Basket
|
7b9c174b031c9ffac2de886f5e149adcd5f7c83f
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import os
import platform
from glob import glob
import utils.appconfig as appconfig
# GLOBAL CONSTANTS
# --- File Structure Constants ---
BASE_DIRS = {
'delivery': [
'CritiqueArchive'
],
'docs': [],
'frames': [],
'library': [
'models',
'templates',
'sound',
'texture'
],
'publish': [],
'source': [
'plates',
'reference'
],
'working': [
'scenes',
'assets'
]}
PROD_DIRS = [
'scenes',
'publish'
]
STAGE_DIRS = appconfig.get_config_value('law', 'stages')
FRAME_DIRS = [
'cg',
'comp',
'edit',
'elements',
'plates'
]
# GLOBAL FUNCTIONS
# SET SHOW ENV VARIABLE
# SET SEQ ENV VARIABLE
# SET SHOT ENV VARIABLE
if __name__ == '__main__':
print serverDir()
| 27.076923 | 115 | 0.595455 |
398d56540cd3fb4efa42ef33aee42fa70cf89afe
| 3,024 |
py
|
Python
|
datasets/thuc_news/thuc_news.py
|
jhxu-org/datasets
|
e78e81ff2aec2928506a42c3312799acd6c5e807
|
[
"Apache-2.0"
] | null | null | null |
datasets/thuc_news/thuc_news.py
|
jhxu-org/datasets
|
e78e81ff2aec2928506a42c3312799acd6c5e807
|
[
"Apache-2.0"
] | null | null | null |
datasets/thuc_news/thuc_news.py
|
jhxu-org/datasets
|
e78e81ff2aec2928506a42c3312799acd6c5e807
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""THUNews"""
import csv
import ctypes
import os
import datasets
csv.field_size_limit(int(ctypes.c_ulong(-1).value // 2))
_CITATION = """\
@misc{xujianhua,
title={page xxx},
author={Xiang Zhang and Junbo Zhao and Yann LeCun},
year={2015},
eprint={1509.01626},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
_DESCRIPTION = """\
THUCTC(THU Chinese Text Classification)\
THUCTCbigramChi-squaretfidf
LibSVMLibLinearTHUCTC
"""
_DATA_URL = "http://127.0.0.1/thuc_news.zip"
_CLS = ['', '', '', '', '', '', '', '', '', '', '', '', '', '']
| 33.977528 | 114 | 0.638889 |
3990560a6bff336fd21ff88b51780152f5105716
| 1,215 |
py
|
Python
|
mundo3/ex115/lib/arquivo/__init__.py
|
dilsonm/CeV
|
8043be36b2da187065691d23ed5cb40fd65f806f
|
[
"MIT"
] | null | null | null |
mundo3/ex115/lib/arquivo/__init__.py
|
dilsonm/CeV
|
8043be36b2da187065691d23ed5cb40fd65f806f
|
[
"MIT"
] | null | null | null |
mundo3/ex115/lib/arquivo/__init__.py
|
dilsonm/CeV
|
8043be36b2da187065691d23ed5cb40fd65f806f
|
[
"MIT"
] | null | null | null |
from lib.interface import cabecalho
| 23.365385 | 57 | 0.516872 |
399279cf633bc710b68c85b8b7d375ff1f8fa454
| 2,626 |
py
|
Python
|
path-sum-four-ways/solution.py
|
ALB37/project-euler-problems
|
c3fb4213e150805bfe45b15847bc6449eb907c7a
|
[
"MIT"
] | null | null | null |
path-sum-four-ways/solution.py
|
ALB37/project-euler-problems
|
c3fb4213e150805bfe45b15847bc6449eb907c7a
|
[
"MIT"
] | null | null | null |
path-sum-four-ways/solution.py
|
ALB37/project-euler-problems
|
c3fb4213e150805bfe45b15847bc6449eb907c7a
|
[
"MIT"
] | null | null | null |
from graph import Graph
matrix = []
with open('p083_matrix.txt') as file:
for line in file.readlines():
currentline = [int(n) for n in line.split(',')]
matrix.append(currentline)
numGraph = Graph()
# add each node first
for i in range(len(matrix)):
for j in range(len(matrix[i])):
numGraph.addNode((i, j))
# then map edges
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if i == 0 and j == 0:
numGraph.addEdge((i, j), (i + 1, j), matrix[i + 1][j])
numGraph.addEdge((i, j), (i, j + 1), matrix[i][j + 1])
elif i == 0 and j == len(matrix[i]) - 1:
numGraph.addEdge((i, j), (i + 1, j), matrix[i + 1][j])
numGraph.addEdge((i, j), (i, j - 1), matrix[i][j - 1])
elif i == len(matrix) - 1 and j == 0:
numGraph.addEdge((i, j), (i, j + 1), matrix[i][j + 1])
numGraph.addEdge((i, j), (i - 1, j), matrix[i - 1][j])
elif i == len(matrix) - 1 and j == len(matrix[i]) - 1:
numGraph.addEdge((i, j), (i - 1, j), matrix[i - 1][j])
numGraph.addEdge((i, j), (i, j - 1), matrix[i][j - 1])
elif i == 0:
numGraph.addEdge((i, j), (i + 1, j), matrix[i + 1][j])
numGraph.addEdge((i, j), (i, j + 1), matrix[i][j + 1])
numGraph.addEdge((i, j), (i, j - 1), matrix[i][j - 1])
elif i == len(matrix) - 1:
numGraph.addEdge((i, j), (i, j + 1), matrix[i][j + 1])
numGraph.addEdge((i, j), (i - 1, j), matrix[i - 1][j])
numGraph.addEdge((i, j), (i, j - 1), matrix[i][j - 1])
elif j == 0:
numGraph.addEdge((i, j), (i + 1, j), matrix[i + 1][j])
numGraph.addEdge((i, j), (i, j + 1), matrix[i][j + 1])
numGraph.addEdge((i, j), (i - 1, j), matrix[i - 1][j])
elif j == len(matrix[i]) - 1:
numGraph.addEdge((i, j), (i + 1, j), matrix[i + 1][j])
numGraph.addEdge((i, j), (i - 1, j), matrix[i - 1][j])
numGraph.addEdge((i, j), (i, j - 1), matrix[i][j - 1])
else:
numGraph.addEdge((i, j), (i + 1, j), matrix[i + 1][j])
numGraph.addEdge((i, j), (i, j + 1), matrix[i][j + 1])
numGraph.addEdge((i, j), (i - 1, j), matrix[i - 1][j])
numGraph.addEdge((i, j), (i, j - 1), matrix[i][j - 1])
endCoordinates = (len(matrix) - 1, len(matrix[0]) - 1)
shortestPathMap = numGraph.aStarSearch((0, 0), endCoordinates)
shortestPath = numGraph.outputPath(shortestPathMap, (0, 0), endCoordinates)
print(sum([matrix[c[0]][c[1]] for c in shortestPath]))
| 38.617647 | 75 | 0.485149 |
39965ea3888f463b999a6106ce07def8d9adf4ac
| 4,010 |
py
|
Python
|
carts/views.py
|
yun-mh/uniwalk
|
f5307f6970b24736d13b56b4792c580398c35b3a
|
[
"Apache-2.0"
] | null | null | null |
carts/views.py
|
yun-mh/uniwalk
|
f5307f6970b24736d13b56b4792c580398c35b3a
|
[
"Apache-2.0"
] | 9 |
2020-01-10T14:10:02.000Z
|
2022-03-12T00:08:19.000Z
|
carts/views.py
|
yun-mh/uniwalk
|
f5307f6970b24736d13b56b4792c580398c35b3a
|
[
"Apache-2.0"
] | null | null | null |
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import render, redirect, get_object_or_404
from designs import models as design_models
from feet import models as foot_models
from products import models as product_models
from .models import Cart, CartItem
#
def add_cart(request, pk, design_pk):
""" """
product = product_models.Product.objects.get(pk=pk)
#
try:
cart = Cart.objects.get(session_key=_session_key(request))
#
except Cart.DoesNotExist:
if request.user.is_authenticated:
cart = Cart.objects.create(
session_key=_session_key(request), user_id=request.user.pk
)
cart.save()
else:
cart = Cart.objects.create(session_key=_session_key(request))
cart.save()
#
try:
cart_item = CartItem.objects.get(product=product, cart=cart, design=design_pk)
#
if (
cart_item.length_left != request.session["length_left"]
or cart_item.length_right != request.session["length_right"]
or cart_item.width_left != request.session["width_left"]
or cart_item.width_right != request.session["width_right"]
):
cart_item.length_left = request.session["length_left"]
cart_item.length_right = request.session["length_right"]
cart_item.width_left = request.session["width_left"]
cart_item.width_right = request.session["width_right"]
#
else:
cart_item.quantity += 1
cart_item.save()
#
except CartItem.DoesNotExist:
cart_item = CartItem.objects.create(
product=product,
design=design_models.Design.objects.get(pk=design_pk),
length_left=request.session["length_left"],
length_right=request.session["length_right"],
width_left=request.session["width_left"],
width_right=request.session["width_right"],
quantity=1,
cart=cart,
)
cart_item.save()
return redirect("carts:cart")
def cart_display(request, amount=0, counter=0, cart_items=None):
""" """
#
try:
cart = Cart.objects.get(session_key=_session_key(request))
cart_items = CartItem.objects.filter(cart=cart)
for cart_item in cart_items:
amount += cart_item.product.price * cart_item.quantity
counter += cart_item.quantity
#
except ObjectDoesNotExist:
pass
return render(
request,
"carts/cart.html",
{"cart_items": cart_items, "amount": amount, "counter": counter},
)
def remove_item(request, pk, design_pk):
""" """
#
cart = Cart.objects.get(session_key=_session_key(request))
product = get_object_or_404(product_models.Product, pk=pk)
cart_item = CartItem.objects.get(product=product, cart=cart, design=design_pk)
# 1
if cart_item.quantity > 1:
cart_item.quantity -= 1
cart_item.save()
# 1
else:
cart_item.delete()
return redirect("carts:cart")
def delete_cartitem(request, pk, design_pk):
""" """
#
cart = Cart.objects.get(session_key=_session_key(request))
product = get_object_or_404(product_models.Product, pk=pk)
cart_item = CartItem.objects.get(product=product, cart=cart, design=design_pk)
cart_item.delete()
return redirect("carts:cart")
| 33.983051 | 86 | 0.672319 |
3996a072b5270c64e9a774f3c2758ba1336ec30d
| 13,515 |
py
|
Python
|
deploy.py
|
j-benson/Deploy
|
9fb2bd1c383949521967a672ac76fcdcaced503f
|
[
"MIT"
] | null | null | null |
deploy.py
|
j-benson/Deploy
|
9fb2bd1c383949521967a672ac76fcdcaced503f
|
[
"MIT"
] | null | null | null |
deploy.py
|
j-benson/Deploy
|
9fb2bd1c383949521967a672ac76fcdcaced503f
|
[
"MIT"
] | null | null | null |
"""
Script to deploy a website to the server by ftp.
- Compares local directory with remote directory
- Updates modified files
- Adds new files
- Optionally, removes deleted files from remote
Requires: python 3.3+
Due to use of ftplib.mlsd()
The MIT License (MIT)
Copyright (c) 2015 James Benson
"""
"""
TODO: FTP response codes to look out for:
- 502 unknown command
- 550 empty directory
- 451 can't remove directory
Good ones:
- 226 transfer complete
"""
asciiExt = ['coffee', 'css', 'erb', 'haml', 'handlebars', 'hb', 'htm', 'html',
'js', 'less', 'markdown', 'md', 'ms', 'mustache', 'php', 'rb', 'sass', 'scss',
'slim', 'txt', 'xhtml', 'xml'];
deleteIgnoreFiles = ["/.ftpquota"];
deleteIgnoreDirs = ["/cgi-bin"];
remoteSep = "/";
dLogName = "debug.txt";
STOR_AUTO = 0;
STOR_BINARY = 1;
STOR_ASCII = 2;
UPLOAD_OVERWRITE = 0;
UPLOAD_MODIFIED = 1;
######################### SETUP ##########################
remoteHost = "127.0.0.1";
remoteUser = "Benson";
remotePassword = "benson";
localPath = "D:\\test\\ftp";
remotePath = "/";
### OPTIONS ###
verbose = True;
remoteTLS = False; # SSL/TLS doesn't work invalid certificate error
remoteDelete = True;
remoteIgnoreHidden = False; # TODO: Implement hidden.
storMode = STOR_BINARY; # only binary currently works
uploadMode = UPLOAD_MODIFIED;
debug = True;
##########################################################
import os;
from datetime import datetime, timedelta;
from ftplib import FTP, FTP_TLS, error_reply, error_temp, error_perm, error_proto, all_errors;
if remoteTLS:
import ssl;
ftp = None;
dLog = None;
# === FTP Functions ===
def stor(dirpath, file):
"""Store the file obj to the dirpath of server."""
ext = (os.path.splitext(file.name())[1]).lstrip('.');
storpath = remoteJoin(dirpath, file.name());
try:
if (storMode == STOR_ASCII) or (storMode == STOR_AUTO and ext in asciiExt):
# Store in ASCII mode
if verbose: print("[asc] ", end="");
ftp.storlines("STOR %s" % storpath, open(file.path));
else:
# Store in binary mode
if verbose: print("[bin] ", end="");
ftp.storbinary("STOR %s" % storpath, open(file.path, "rb"));
setModified(dirpath, file);
if verbose: print("Uploaded: %s -> %s" % (file.path, storpath));
except OSError as oserror:
print("Failed Upload: %s\n %s" % (file.path, oserror));
def setModified(dirpath, file):
"""Attempts to set the modified time with MFMT."""
ftp.voidcmd("MFMT %s %s" % (file.getModified(), remoteJoin(dirpath, file.name())));
def rm(dirpath, file):
"""Delete the file at the path from the server."""
p = remoteJoin(dirpath, file.name());
_rm(p);
if verbose: print("Deleted: %s" % p);
def _rmDir(dirpath):
"""Delete directory with name from the current working directory.
Only deletes empty directories."""
ftp.rmd(dirpath); # TODO: What if fails to delete?
def _rmDirR(dirpath):
"""Remove the directory at dirpath and its contents (recursive)."""
try:
dirs, files = listRemote(dirpath);
for f in files:
_rm(f.path);
for d in dirs:
_rmDirR(d.path);
_rmDir(d.path);
except:
raise error_temp("451 Can't remove directory");
# === End FTP Functions ===
# === Traversal Functions ===
# === End Traversal Functions ===
# === Structures ===
# === End Structures ===
def compareFiles(localList, remoteList, checkDeleted = True):
"""Compares localList with remoteList gets the tuple containing File objects:
(new, modified, unmodified, deleted)
new: Files that are in localList but not in remoteList.
modified: Files that are newer in localList than remoteList.
unmodified: Files that are the same in both lists.
deleted: Files that are in the remoteList but not in localList.
*newer is defined by the file's date modified attribute.
New, Modified and Unmodified will contain local files objects that need to
be uploaded to the remote location.
Deleted will contain remote file objects that need to be deleted from
the remote location."""
new = [];
modified = [];
unmodified = [];
deleted = [];
dprint("COMPARE FILES");
for lfile in localList:
dprint("LOCAL: %s - %s" % (lfile.path, lfile.modified));
existsInRemote = False;
for rfile in remoteList:
if lfile == rfile:
dprint("REMOTE: %s - %s" % (rfile.path, rfile.modified));
existsInRemote = True;
if uploadMode == UPLOAD_OVERWRITE or lfile > rfile:
dprint("Upload Mode: %s | Modified: lfile > rfile" % uploadMode);
modified.append(lfile);
else:
dprint("Not Modified: lfile <= rfile");
unmodified.append(lfile);
break;
if not existsInRemote:
dprint("New local file");
new.append(lfile);
dprint("--------------------------------------");
# Check for deleted files
if checkDeleted:
dprint("CHECK FOR DELETED FILES");
for rfile in remoteList:
existsInLocal = False;
for lfile in localList:
if rfile == lfile:
existsInLocal = True;
break;
if not existsInLocal and not rfile.path in deleteIgnoreFiles:
dprint("DELETED: %s" % rfile.path);
deleted.append(rfile);
dprint("--------------------------------------");
return (new, modified, unmodified, deleted);
def compareDirs(localList, remoteList, checkDeleted = True):
"""Compares localList with remoteList gets the tuple containing string
names of the directories: (new, existing, deleted)
new: Directories that are in localList but not in remoteList.
existing: Directories that are in both lists.
deleted: Directories that are in the remoteList but not in localList.
localList - list of strings of the directory names in the local location.
remoteList - list of strings of the directory name in the remote location."""
new = [];
existing = [];
deleted = [];
dprint("COMPARE DIRECTORIES");
for ldir in localList:
dprint("LOCAL DIR: %s"%ldir.path);
existsInRemote = False;
for rdir in remoteList:
if ldir == rdir:
dprint("REMOTE DIR: %s"%rdir.path);
dprint("Exists On Local and Remote");
existsInRemote = True;
existing.append(ldir)
break;
if not existsInRemote:
dprint("New Local Directory");
new.append(ldir);
# Check for deleted directories
if checkDeleted:
dprint("CHECK FOR DELETED DIRECTORIES");
for rdir in remoteList:
existsInLocal = False;
for ldir in localList:
if rdir == ldir:
existsInLocal = True;
break;
if not existsInLocal and not rdir.path in deleteIgnoreDirs:
dprint("DELETED: %s" % rdir.path);
deleted.append(rdir);
dprint("--------------------------------------");
return (new, existing, deleted);
def dprint(line, end="\n"):
global dLog;
if debug:
if dLog == None:
if os.path.exists(dLogName):
os.remove(dLogName);
dLog = open(dLogName, "w")
dLog.write(line + end);
if __name__ == "__main__":
main();
| 35.565789 | 129 | 0.592379 |
39972511fba92d415fe55b1c71b33e08a7f6d99e
| 6,079 |
py
|
Python
|
pythorn/data_structures/queue.py
|
Gourav-KP/pythorn
|
f7130721c02292af0e23bd8bcf31d41990c0d48b
|
[
"MIT"
] | 5 |
2020-11-23T14:10:28.000Z
|
2021-05-07T16:25:38.000Z
|
pythorn/data_structures/queue.py
|
Gourav-KP/pythorn
|
f7130721c02292af0e23bd8bcf31d41990c0d48b
|
[
"MIT"
] | null | null | null |
pythorn/data_structures/queue.py
|
Gourav-KP/pythorn
|
f7130721c02292af0e23bd8bcf31d41990c0d48b
|
[
"MIT"
] | 3 |
2020-11-25T11:00:14.000Z
|
2021-10-01T12:16:30.000Z
|
"""
Author : Robin Singh
Programs List:
1.Queue
2.Circular Queue
3.Double Ended Queue
"""
import inspect
| 22.853383 | 204 | 0.497779 |
3997e398937ee03af443d926f755e2d9046ee9c6
| 1,740 |
py
|
Python
|
wataru/commands/models/project.py
|
risuoku/wataru
|
63be36d15454abd0636f67eaf1e80728b8c5a9bd
|
[
"MIT"
] | null | null | null |
wataru/commands/models/project.py
|
risuoku/wataru
|
63be36d15454abd0636f67eaf1e80728b8c5a9bd
|
[
"MIT"
] | null | null | null |
wataru/commands/models/project.py
|
risuoku/wataru
|
63be36d15454abd0636f67eaf1e80728b8c5a9bd
|
[
"MIT"
] | null | null | null |
from wataru.commands.models.base import CommandBase
from wataru.logging import getLogger
import wataru.rules.models as rmodels
import os
import sys
logger = getLogger(__name__)
| 32.222222 | 114 | 0.65977 |
3998894acc2c2f5b50a8cd1451c55bffb80880f7
| 2,914 |
py
|
Python
|
UnityExamples/Assets/StreamingAssets/Python/BlockLibraries/UnityExamples/FingerTrace.py
|
6henrykim/UnityExamples
|
3d4d782e6e67fee1ede902998c2df1b5b90b074a
|
[
"Apache-2.0"
] | 9 |
2020-04-02T10:33:37.000Z
|
2021-12-03T17:14:40.000Z
|
UnityExamples/Assets/StreamingAssets/Python/BlockLibraries/UnityExamples/FingerTrace.py
|
ultrahaptics/UnityExamples
|
3d4d782e6e67fee1ede902998c2df1b5b90b074a
|
[
"Apache-2.0"
] | 2 |
2019-11-06T10:37:18.000Z
|
2021-09-20T14:31:13.000Z
|
UnityExamples/Assets/StreamingAssets/Python/BlockLibraries/UnityExamples/FingerTrace.py
|
ultrahaptics/UnityExamples
|
3d4d782e6e67fee1ede902998c2df1b5b90b074a
|
[
"Apache-2.0"
] | 1 |
2022-02-25T16:38:52.000Z
|
2022-02-25T16:38:52.000Z
|
# A Sensation which creates a Polyline of 35 points of the finger joints, along which a Circle Path is animated.
from pysensationcore import *
import sensation_helpers as sh
import HandOperations
# We will use the joint positions of the fingers to animate a Circle along a PolylinePath
fingers = ["thumb", "indexFinger", "middleFinger", "ringFinger", "pinkyFinger"]
bones = ["metacarpal", "proximal", "intermediate", "distal", "intermediate","proximal","metacarpal"]
jointKeyFrames = []
# Create a Polyline Path for each Animation Step
animPath = createInstance("PolylinePath", "PolylinePathInstance")
# Create inputs for each of the Bone joints
for finger in fingers:
for bone in bones:
jointInputName = "%s_%s_position" % (finger, bone)
jointKeyFrames+=[jointInputName]
# The number of Key frames
numPoints = len(jointKeyFrames)
points = sh.createList(numPoints)
# Connect the points list for our Polylinepath to the animation path
connect(points["output"], animPath.points)
translateAlongPath = createInstance("TranslateAlongPath", "translateAlongPath")
connect(Constant((1,0,0)), translateAlongPath.direction)
connect(animPath.out, translateAlongPath.animationPath)
# The Object Path (a circle) Will trace along the animation Path
# On top of its translation along the path, we apply a rotation transform,
# to match the orientation of the Palm
circlePath = createInstance("CirclePath", "objectPath")
orientToPalmInstance = createInstance("OrientPathToPalm", "orientToPalm")
# Object Path -> OrientPathToPalm -> TranslateAlongPath
connect(circlePath.out, orientToPalmInstance.path)
connect(orientToPalmInstance.out, translateAlongPath.objectPath)
topLevelInputs = {}
for n in range(0,numPoints):
topLevelInputs[(jointKeyFrames[n], points["inputs"][n])] = (0,0,0)
topLevelInputs[("t", translateAlongPath.t)] = (0, 0, 0)
topLevelInputs[("duration", translateAlongPath.duration)] = (2.5,0,0)
topLevelInputs[("dotSize", circlePath.radius)] = (0.01, 0, 0)
topLevelInputs[("palm_direction", orientToPalmInstance.palm_direction)] = (0, 0, 0)
topLevelInputs[("palm_normal", orientToPalmInstance.palm_normal)] = (0, 0, 0)
fingerScan = sh.createSensationFromPath("Finger Trace",
topLevelInputs,
output = translateAlongPath.out,
drawFrequency = 120,
renderMode=sh.RenderMode.Loop,
definedInVirtualSpace = True
)
# Hide the non-vital inputs...
visibleInputs = ("duration", "dotSize")
for topLevelInput in topLevelInputs.keys():
inputName = topLevelInput[0]
if inputName not in visibleInputs:
setMetaData(getattr(fingerScan, inputName), "Input-Visibility", False)
setMetaData(fingerScan.duration, "Type", "Scalar")
setMetaData(fingerScan.dotSize, "Type", "Scalar")
| 42.231884 | 112 | 0.710707 |
3998e8576c81d8620613973a3fcb28ca0f349137
| 2,053 |
py
|
Python
|
scripts/extarct_from_videos.py
|
corenel/yt8m-feature-extractor
|
3f658749fd365478f1f26daa78b3e7b8d4844047
|
[
"MIT"
] | 18 |
2017-09-12T07:02:28.000Z
|
2021-06-07T13:38:51.000Z
|
scripts/extarct_from_videos.py
|
corenel/yt8m-feature-extractor
|
3f658749fd365478f1f26daa78b3e7b8d4844047
|
[
"MIT"
] | 1 |
2017-10-19T13:51:41.000Z
|
2017-12-30T08:49:08.000Z
|
scripts/extarct_from_videos.py
|
corenel/yt8m-feature-extractor
|
3f658749fd365478f1f26daa78b3e7b8d4844047
|
[
"MIT"
] | 3 |
2017-09-07T07:07:22.000Z
|
2018-09-18T15:49:29.000Z
|
"""Extract inception_v3_feats from videos for Youtube-8M feature extractor."""
import os
import torch
import init_path
import misc.config as cfg
from misc.utils import (concat_feat_var, get_dataloader, make_cuda,
make_variable)
from models import inception_v3
if __name__ == '__main__':
# init models and data loader
model = make_cuda(inception_v3(pretrained=True,
transform_input=True,
extract_feat=True))
model.eval()
# get vid list
video_list = os.listdir(cfg.video_root)
video_list = [v for v in video_list
if os.path.splitext(v)[1] in cfg.video_ext]
# extract features by inception_v3
for idx, video_file in enumerate(video_list):
vid = os.path.splitext(video_file)[0]
filepath = os.path.join(cfg.video_root, video_file)
if os.path.exists(cfg.inception_v3_feats_path.format(vid)):
print("skip {}".format(vid))
else:
print("processing {}".format(vid))
# data loader for frames in single video
data_loader = get_dataloader(dataset="VideoFrame",
path=filepath,
num_frames=cfg.num_frames,
batch_size=cfg.batch_size)
# extract features by inception_v3
feats = None
for step, frames in enumerate(data_loader):
print("--> extract features [{}/{}]".format(step + 1,
len(data_loader)))
feat = model(make_variable(frames))
feats = concat_feat_var(feats, feat.data.cpu())
print("--> save feats to {}"
.format(cfg.inception_v3_feats_path.format(vid)))
torch.save(feats, cfg.inception_v3_feats_path.format(vid))
# print("--> delete original video file: {}".format(filepath))
# os.remove(filepath)
| 40.254902 | 78 | 0.560156 |
399fd36bf8e08b05046794370fe69a0ebbb1e2b1
| 4,208 |
py
|
Python
|
wc_rules/simulator/simulator.py
|
KarrLab/wc_rules
|
5c6d8ec7f3152f2d234107d6fec3e2bc8d9ff518
|
[
"MIT"
] | 5 |
2018-12-24T16:20:27.000Z
|
2022-02-12T23:07:42.000Z
|
wc_rules/simulator/simulator.py
|
KarrLab/wc_rules
|
5c6d8ec7f3152f2d234107d6fec3e2bc8d9ff518
|
[
"MIT"
] | 7 |
2019-01-14T23:08:52.000Z
|
2021-06-03T02:38:43.000Z
|
wc_rules/simulator/simulator.py
|
KarrLab/wc_rules
|
5c6d8ec7f3152f2d234107d6fec3e2bc8d9ff518
|
[
"MIT"
] | 3 |
2018-12-15T00:51:56.000Z
|
2020-04-29T14:12:34.000Z
|
from collections import deque
from ..utils.collections import DictLike
from ..matcher.core import ReteNet
from ..matcher.actions import make_node_token, make_edge_token, make_attr_token
from .sampler import NextReactionMethod
| 34.491803 | 140 | 0.736217 |
39a05a3ae20bd7b9b573cc3402d91e45b4b3aa9a
| 594 |
py
|
Python
|
samples/module_snapcheck.py
|
luislezcair/jsnapy
|
86381aa389cf19394a6165fe34bcfd95ee8a7f67
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 101 |
2016-07-04T13:18:48.000Z
|
2022-02-11T19:18:15.000Z
|
samples/module_snapcheck.py
|
luislezcair/jsnapy
|
86381aa389cf19394a6165fe34bcfd95ee8a7f67
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 187 |
2016-07-06T14:58:03.000Z
|
2022-03-15T09:19:11.000Z
|
samples/module_snapcheck.py
|
luislezcair/jsnapy
|
86381aa389cf19394a6165fe34bcfd95ee8a7f67
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 70 |
2016-07-12T15:20:58.000Z
|
2022-03-25T05:14:40.000Z
|
### performing function similar to --snapcheck option in command line ######
from jnpr.jsnapy import SnapAdmin
from pprint import pprint
from jnpr.junos import Device
js = SnapAdmin()
config_file = "/etc/jsnapy/testfiles/config_single_snapcheck.yml"
snapvalue = js.snapcheck(config_file, "snap")
for snapcheck in snapvalue:
print "\n -----------snapcheck----------"
print "Tested on", snapcheck.device
print "Final result: ", snapcheck.result
print "Total passed: ", snapcheck.no_passed
print "Total failed:", snapcheck.no_failed
pprint(dict(snapcheck.test_details))
| 33 | 76 | 0.720539 |
39a0dad5efbaf0ea7f66987d69ed3575a2e7b7d0
| 1,068 |
py
|
Python
|
python/easy/1342_Number_of_Steps_to_Reduce_a_Number_to_Zero.py
|
JackWang0107/leetcode
|
c02932190b639ef87a8d0fcd07d9cd6ec7344a67
|
[
"MIT"
] | 1 |
2021-05-22T03:27:33.000Z
|
2021-05-22T03:27:33.000Z
|
python/easy/1342_Number_of_Steps_to_Reduce_a_Number_to_Zero.py
|
JackWang0107/leetcode
|
c02932190b639ef87a8d0fcd07d9cd6ec7344a67
|
[
"MIT"
] | null | null | null |
python/easy/1342_Number_of_Steps_to_Reduce_a_Number_to_Zero.py
|
JackWang0107/leetcode
|
c02932190b639ef87a8d0fcd07d9cd6ec7344a67
|
[
"MIT"
] | null | null | null |
from typing import *
if __name__ == "__main__":
so = Solution()
print(so.numberOfSteps(123))
| 34.451613 | 110 | 0.553371 |
39a16a05ac36a9db042c0bce00dc04a5a657ef37
| 1,370 |
py
|
Python
|
__private__/temp_dev/testshapefile.py
|
karimbahgat/PyA
|
4d62a0850ba1dca93f7362ef23e18a13938fce4f
|
[
"MIT"
] | 16 |
2016-02-26T15:24:28.000Z
|
2021-06-16T21:00:22.000Z
|
__private__/temp_dev/testshapefile.py
|
karimbahgat/PyA
|
4d62a0850ba1dca93f7362ef23e18a13938fce4f
|
[
"MIT"
] | 5 |
2016-02-27T20:13:26.000Z
|
2018-09-12T23:08:36.000Z
|
__private__/temp_dev/testshapefile.py
|
karimbahgat/PyA
|
4d62a0850ba1dca93f7362ef23e18a13938fce4f
|
[
"MIT"
] | 7 |
2015-07-08T12:51:57.000Z
|
2019-12-05T19:07:27.000Z
|
import Tkinter as tk
from PIL import Image, ImageTk
import aggdraw
window = tk.Tk()
label = tk.Label(window)
label.pack()
# schedule changing images
import itertools, random, time
# Begin #
img = aggdraw.Draw("RGBA", (1000,600), random_n(0,222,n=3) )
import geovis
sf = geovis.shapefile_fork.Reader("D:/Test Data/cshapes/cshapes.shp")
for shape in sf.iterShapes():
if shape.__geo_interface__["type"] == "Polygon":
flatcoords = [xory+350 for xy in shape.__geo_interface__["coordinates"][0] for xory in xy]
draw_polygon(img, flatcoords)
update(img)
window.mainloop()
| 22.096774 | 98 | 0.674453 |
39a902062ca7512880d1818276ec6c8f4ed11b57
| 693 |
py
|
Python
|
aoc10.py
|
roscroft/aoc-2020
|
3f37f6b29ec66bac5610bccd6de5ebb000bde312
|
[
"MIT"
] | 1 |
2020-12-07T22:16:17.000Z
|
2020-12-07T22:16:17.000Z
|
aoc10.py
|
roscroft/aoc-2020
|
3f37f6b29ec66bac5610bccd6de5ebb000bde312
|
[
"MIT"
] | null | null | null |
aoc10.py
|
roscroft/aoc-2020
|
3f37f6b29ec66bac5610bccd6de5ebb000bde312
|
[
"MIT"
] | null | null | null |
from utils import utils
if __name__ == "__main__":
day = 10
data = utils.get_ints_from_file(f"data/aoc{day}_data.txt")
data = sorted(data)
data = [0] + data + [data[-1]+3]
print(f"Part 1 solution: {part_1(data)}")
print(f"Part 2 solution: {part_2(data)}")
| 34.65 | 90 | 0.588745 |
39a92e95003cf25b12c9d62aa465b8c0ddd75afb
| 5,510 |
py
|
Python
|
HyperGui.py
|
MIC-Surgery-Heidelberg/HyperGUI_1.0
|
0ee8e0da85049076bb22a542d15d6c3adf6ea106
|
[
"MIT"
] | null | null | null |
HyperGui.py
|
MIC-Surgery-Heidelberg/HyperGUI_1.0
|
0ee8e0da85049076bb22a542d15d6c3adf6ea106
|
[
"MIT"
] | null | null | null |
HyperGui.py
|
MIC-Surgery-Heidelberg/HyperGUI_1.0
|
0ee8e0da85049076bb22a542d15d6c3adf6ea106
|
[
"MIT"
] | null | null | null |
"""
@author: Alexander Studier-Fischer, Jan Odenthal, Berkin Oezdemir, Isabella Camplisson, University of Heidelberg
"""
from HyperGuiModules import *
import logging
import os
#logging.basicConfig(level=logging.DEBUG)
xSize=None
ySize=None
if __name__ == '__main__':
main()
| 36.979866 | 162 | 0.741561 |
39a9bf645816b1c506dcc188750ce0f86697bf35
| 241 |
py
|
Python
|
8. The Prisoner.py
|
Zfauser/Code-Combat-Introductory-To-Computer-Science-Python-Answers
|
231d17ad2224fc616c022b515bc14e78ec5822f9
|
[
"MIT"
] | 1 |
2021-02-25T16:43:08.000Z
|
2021-02-25T16:43:08.000Z
|
8. The Prisoner.py
|
Zfauser/Code-Combat-Introductory-To-Computer-Science-Python-Answers
|
231d17ad2224fc616c022b515bc14e78ec5822f9
|
[
"MIT"
] | null | null | null |
8. The Prisoner.py
|
Zfauser/Code-Combat-Introductory-To-Computer-Science-Python-Answers
|
231d17ad2224fc616c022b515bc14e78ec5822f9
|
[
"MIT"
] | null | null | null |
# Free the prisoner, defeat the guard and grab the gem.
hero.moveRight()
# Free Patrick from behind the "Weak Door".
hero.attack("Weak Door")
hero.moveRight(2)
# Defeat the guard, named "Two".
# Get the gem.
hero.moveRight()
hero.moveDown(3)
| 26.777778 | 55 | 0.73029 |
39ab4f35e7e866e763852b3e23d066d864569549
| 1,120 |
py
|
Python
|
conti_wc.py
|
saturn99/cleaks
|
c826c973d9695c3bfc31bf580b470267792807e7
|
[
"MIT"
] | 6 |
2022-03-01T10:33:52.000Z
|
2022-03-05T22:26:27.000Z
|
conti_wc.py
|
saturn99/cleaks
|
c826c973d9695c3bfc31bf580b470267792807e7
|
[
"MIT"
] | 1 |
2022-03-01T13:40:29.000Z
|
2022-03-01T13:40:29.000Z
|
conti_wc.py
|
saturn99/cleaks
|
c826c973d9695c3bfc31bf580b470267792807e7
|
[
"MIT"
] | 2 |
2022-03-01T10:40:57.000Z
|
2022-03-01T13:21:23.000Z
|
# -*- coding: utf-8 -*-
# import libraries
import os
from PIL import Image
import nltk
import numpy as np
import matplotlib.pyplot as plt
import random
from scipy.ndimage import gaussian_gradient_magnitude
from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS
# import mask image. Search for stencil image for better results
mask = np.array(Image.open("darthvader01.png"))
# define function for grayscale coloring
# Load and text and decode
text = open(('conti_just_body.txt'), "rb").read().decode('UTF-8', errors='replace')
# Load stopwords for EN language from nlkt
stopwords = nltk.corpus.stopwords.words('english')
# Create Worldcloud
wc = WordCloud(max_words=100000, width=1596, height=584, stopwords=stopwords, mask=mask).generate(text)
# Recolor our Wordcloud
plt.imshow(wc.recolor(color_func=grey_color_func, random_state=3),
interpolation="bilinear")
# Save worldcloud file
wc.to_file("CONTI_Darth.png")
| 25.454545 | 103 | 0.738393 |
39ab88cab3f3527e44f2aa4992feac019e41f3f0
| 2,120 |
py
|
Python
|
PA2_Optical_Flow.py
|
tianzixie/CAP5415PA2
|
6a7f4b1f178f10b37d588e698eddd013ce193544
|
[
"MIT"
] | null | null | null |
PA2_Optical_Flow.py
|
tianzixie/CAP5415PA2
|
6a7f4b1f178f10b37d588e698eddd013ce193544
|
[
"MIT"
] | null | null | null |
PA2_Optical_Flow.py
|
tianzixie/CAP5415PA2
|
6a7f4b1f178f10b37d588e698eddd013ce193544
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 26 08:19:16 2017
@author: 0
"""
from scipy.misc import imresize
from scipy.signal import convolve,convolve2d
import scipy
from PIL import Image
import cv2
import numpy as np
img = cv2.imread("C://Users/0/Downloads/basketball1.png",0)
img2 = cv2.imread("C://Users/0/Downloads/basketball2.png",0)
#cv2.imshow('img',img)
#cv2.imshow('img2',img2)
k=(3,3)
print img
img = cv2.GaussianBlur(img, k, 1.5)
img2 = cv2.GaussianBlur(img2, k, 1.5)
cv2.imshow('img3',img)
#cv2.waitKey(10000)
cv2.destroyAllWindows()
imga=np.matrix(img)
imga2=np.matrix(img2)
#print imga
#img=Image.fromarray(imga)
#img.show()
height,width = imga.shape
#for x in range img(x,0):
print imga.shape
print height ,width
# print x
#for y in height:
# for x in width:
# print '0'
#for y in range(height):
print imga
#imga[0,1]=imga[0,1]+1
#print imga
print fx(1,0),fy(0,4)
imga=imresize(imga,(240,320))
imga2=imresize(imga2,(240,320))
print imga,imga.shape,imga2,imga2.shape
u=np.zeros([240,320])
v=np.zeros([240,320])
w2=30
w=15
#for i in range(w2):
# for y in range(w2):
#
#
# print matrix
#matrix=np.zeros([w2,w2])
#
#for x in range(w,240-w):
#
# for y in range(w,320-w):
# c=0
## matrix[w,w]=x
# print x,y
#print matrix
#def conv2(x, y, mode='same'):
# return np.rot90(convolve2d(np.rot90(x, 2), np.rot90(y, 2), mode=mode), 2)
#print convolve2d(imga2,matrix,'valid')
'''
ft = scipy.signal.convolve2d(imga, 0.25 * np.ones((2,2))) + \
scipy.signal.convolve2d(imga2, -0.25 * np.ones((2,2)))
#print ft
fx,fy=np.gradient(cv2.GaussianBlur(img, k, 1.5))
fx = fx[0:478, 0:638]
fy = fy[0:478, 0:638]
ft = ft[0:478, 0:638]
#print fx,fy,ft
'''
'''
for i in range(w+1,480-w):
for j in range(w+1,640-w):
Ix = fx[i-w:i+w, j-w:j+w]
Iy = fy[i-w:i+w, j-w:j+w]
It = ft[i-w:i+w, j-w:j+w]
A = [Ix,Iy]
print fx,fy,ft
'''
#C=A.T*-It
#print C
#print curFx,curFy,curFt,U[0],U[1]
| 20.784314 | 78 | 0.618868 |
39ab9e369da24d4871a1bbc5c6f073cf0d4fed1f
| 743 |
py
|
Python
|
Test_data/database.py
|
mayowak/SQLite_test
|
a1185650dffe360d033e0691567ec2b2e075cae5
|
[
"MIT"
] | null | null | null |
Test_data/database.py
|
mayowak/SQLite_test
|
a1185650dffe360d033e0691567ec2b2e075cae5
|
[
"MIT"
] | null | null | null |
Test_data/database.py
|
mayowak/SQLite_test
|
a1185650dffe360d033e0691567ec2b2e075cae5
|
[
"MIT"
] | null | null | null |
#!usr/bin/env python3
#import dependecies
import sqlite3
import csv
#connect to test_data
conn = sqlite3.connect('test_data.db')
#create a cursor
c = conn.cursor()
c.execute("DROP TABLE test_data")
#create a test_data table
c.execute("""CREATE TABLE test_data(age integer,
sex text,
bmi real,
children integer,
smoker text,
region text)""")
#get test_data file
get_file = open('test_data.csv')
#read test_data file
read_file = csv.reader(get_file)
c.executemany("INSERT INTO test_data VALUES (?, ?, ?, ?, ?, ?,?)", read_file)
conn.commit()
conn.close()
| 22.515152 | 78 | 0.549125 |
39abb2ca3dacb04c99f9108d126a09ef92f5c7d4
| 1,824 |
py
|
Python
|
swift_cloud_py/validate_safety_restrictions/validate.py
|
stijnfleuren/swift_cloud_api
|
30f3b6c1fd80e5cfa5ce11e1daa08a09ab1e4e9b
|
[
"MIT"
] | 3 |
2021-05-25T18:29:38.000Z
|
2021-08-03T17:04:29.000Z
|
swift_cloud_py/validate_safety_restrictions/validate.py
|
stijnfleuren/swift_cloud_api
|
30f3b6c1fd80e5cfa5ce11e1daa08a09ab1e4e9b
|
[
"MIT"
] | null | null | null |
swift_cloud_py/validate_safety_restrictions/validate.py
|
stijnfleuren/swift_cloud_api
|
30f3b6c1fd80e5cfa5ce11e1daa08a09ab1e4e9b
|
[
"MIT"
] | null | null | null |
from swift_cloud_py.entities.control_output.fixed_time_schedule import FixedTimeSchedule
from swift_cloud_py.entities.intersection.intersection import Intersection
from swift_cloud_py.validate_safety_restrictions.validate_bounds import validate_bounds
from swift_cloud_py.validate_safety_restrictions.validate_completeness import validate_completeness
from swift_cloud_py.validate_safety_restrictions.validate_conflicts import validate_conflicts
from swift_cloud_py.validate_safety_restrictions.validate_fixed_orders import validate_fixed_orders
from swift_cloud_py.validate_safety_restrictions.validate_other_sg_relations import validate_other_sg_relations
def validate_safety_restrictions(intersection: Intersection, fixed_time_schedule: FixedTimeSchedule,
tolerance: float = 10**(-2)) -> None:
"""
Check if the fixed-time schedule satisfies the safety restrictions such as bounds on greenyellow times
and bounds on red times.
:param intersection: intersection object (this object also contains safety restrictions that a
fixed-time schedule should satisfy)
:param fixed_time_schedule: the schedule that we would like to validate
:param tolerance: tolerance in seconds for violating safety restrictions
This method raises a SafetyViolation-exception if the safety restrictions are not satisfied.
"""
validate_bounds(intersection=intersection, fts=fixed_time_schedule, tolerance=tolerance)
validate_conflicts(intersection=intersection, fts=fixed_time_schedule, tolerance=tolerance)
validate_other_sg_relations(intersection=intersection, fts=fixed_time_schedule, tolerance=tolerance)
validate_completeness(intersection=intersection, fts=fixed_time_schedule)
validate_fixed_orders(intersection=intersection, fts=fixed_time_schedule)
| 67.555556 | 111 | 0.838268 |
39ac7cdc9dcc48e4f5e6e8db36ab648730a99cc2
| 20,366 |
py
|
Python
|
source/python/brick_characterizer/CellRiseFall_Char.py
|
electronicvisions/brick
|
9ad14f9d2912e70191f4711f359e3912c8cef837
|
[
"BSD-3-Clause"
] | 1 |
2016-08-02T15:23:16.000Z
|
2016-08-02T15:23:16.000Z
|
source/python/brick_characterizer/CellRiseFall_Char.py
|
ahartel/brick
|
9ad14f9d2912e70191f4711f359e3912c8cef837
|
[
"BSD-3-Clause"
] | null | null | null |
source/python/brick_characterizer/CellRiseFall_Char.py
|
ahartel/brick
|
9ad14f9d2912e70191f4711f359e3912c8cef837
|
[
"BSD-3-Clause"
] | 1 |
2016-05-27T21:22:14.000Z
|
2016-05-27T21:22:14.000Z
|
from timingsignal import TimingSignal
from brick_characterizer.CharBase import CharBase
| 50.78803 | 402 | 0.584847 |
39ad13fb0f9312898dcd01e19fe49f2a734c1783
| 58 |
py
|
Python
|
pyjpboatrace/utils/__init__.py
|
miyamamoto/pyjpboatrace
|
fbc4a794d1f03e2ed7dfcafcb20c43098c1434a6
|
[
"MIT"
] | null | null | null |
pyjpboatrace/utils/__init__.py
|
miyamamoto/pyjpboatrace
|
fbc4a794d1f03e2ed7dfcafcb20c43098c1434a6
|
[
"MIT"
] | null | null | null |
pyjpboatrace/utils/__init__.py
|
miyamamoto/pyjpboatrace
|
fbc4a794d1f03e2ed7dfcafcb20c43098c1434a6
|
[
"MIT"
] | null | null | null |
from .str2num import str2num
__all__ = [
'str2num'
]
| 9.666667 | 28 | 0.655172 |
39ae3c36550302817294c61764f3350d2f47cf3d
| 2,168 |
py
|
Python
|
snippets/integers.py
|
rhishi/python-snippets
|
60020d3a187d7687b38b6b58f74ceb03a37983b9
|
[
"Apache-2.0"
] | null | null | null |
snippets/integers.py
|
rhishi/python-snippets
|
60020d3a187d7687b38b6b58f74ceb03a37983b9
|
[
"Apache-2.0"
] | null | null | null |
snippets/integers.py
|
rhishi/python-snippets
|
60020d3a187d7687b38b6b58f74ceb03a37983b9
|
[
"Apache-2.0"
] | null | null | null |
import sys
# First: to understand the uses of "format" below, read these:
# Format String Syntax https://docs.python.org/2/library/string.html#formatstrings
# Format Specification Mini-Language https://docs.python.org/2/library/string.html#formatspec
# In Python 2, there are two integer types: int, long.
# int is the underlying platform's signed integer type,
# either 32 or 64 bit, depending on the platform.
print "2^31 - 1 = {0:20} = {0:17x} ".format((1 << 31) - 1)
print "2^63 - 1 = {0:20} = {0:17x} ".format((1 << 63) - 1)
# sys.maxint gives the maximum value of int. It is 2^31-1 or 2^63-1.
maxint = sys.maxint
print " max int = {0:20} = {0:17x} {1}".format(maxint, type(maxint))
# There is no sys.minint, but it's simply -sys.maxint-1 as said in Python documentation
# http://docs.python.org/2/library/stdtypes.html#numeric-types-int-float-long-complex
minint = -maxint - 1
print " min int = {0:20} = {0:17x} {1}".format(minint, type(minint))
print
# long is an integer type with unlimited range. Python automatically
# switches over from int to long whenever there is overflow.
# That's why, there is no sys.maxlong.
# Python 3 even gets rid of sys.maxint, because it has just single
# integer type: int. It actually behaves like 2's long i.e. has unlimited range.
# 3 has sys.maxsize, which loosely relates to 2's sys.maxint.
# http://docs.python.org/3.3/whatsnew/3.0.html#integers
# http://docs.python.org/3/library/stdtypes.html#numeric-types-int-float-complex
# Let's test the automatic switchover from int to long
# On 64-bit platform, the switchover point is between 2^63-1 and 2^63.
for r in [ range(1, 22), range(28, 37), range(53, 69), range(88, 100), range(123, 131) ]:
for i in r:
# make 2^i - 1, without spilling beyond i bits.
n = (((1 << (i-1)) - 1) << 1) + 1
# i is formatted as left-aligned ('<'), width 3.
# n is formatted as hex ('x') with 0x prefix ('#'), width 35.
print "2**{0:<3} - 1 = {1:#35x} {2}".format(i, n, type(n))
print " + 1 = {1:#35x} {2}".format(i, n+1, type(n+1))
print "..."
print
print -1
print -1 & 0xFF
print -1 & 0xFFF
| 38.714286 | 95 | 0.652675 |
39aefe4ed5c77eadc14e52071c40e7bf0197d590
| 332 |
py
|
Python
|
covid mail/main.py
|
rahul263-stack/PROJECT-Dump
|
d8b1cfe0da8cad9fe2f3bbd427334b979c7d2c09
|
[
"MIT"
] | 1 |
2020-04-06T04:41:56.000Z
|
2020-04-06T04:41:56.000Z
|
covid mail/main.py
|
rahul263-stack/quarantine
|
d8b1cfe0da8cad9fe2f3bbd427334b979c7d2c09
|
[
"MIT"
] | null | null | null |
covid mail/main.py
|
rahul263-stack/quarantine
|
d8b1cfe0da8cad9fe2f3bbd427334b979c7d2c09
|
[
"MIT"
] | null | null | null |
import os
from sendDetailedEmail.email import MailAttachment
if __name__=="__main__":
clientEmail = input("input a valid client email ID: ")
sendMail(clientEmail)
| 22.133333 | 58 | 0.698795 |
39af2956611d454e6abd79bee5b3ec4243b86cd1
| 2,933 |
py
|
Python
|
pyodide_importer/api.py
|
ryanking13/pyodide-importer
|
fb9f83e54eb307fcdb2590588f0b75db1c87ca97
|
[
"MIT"
] | 1 |
2021-11-16T11:55:54.000Z
|
2021-11-16T11:55:54.000Z
|
pyodide_importer/api.py
|
ryanking13/pyodide-importer
|
fb9f83e54eb307fcdb2590588f0b75db1c87ca97
|
[
"MIT"
] | null | null | null |
pyodide_importer/api.py
|
ryanking13/pyodide-importer
|
fb9f83e54eb307fcdb2590588f0b75db1c87ca97
|
[
"MIT"
] | null | null | null |
from contextlib import contextmanager
import pathlib
import sys
from typing import Union, List
from .import_hook import PyFinder, PyHTTPFinder
# Singleton instance of PyFinder
pyfinder: PyFinder = None
def _update_syspath(path: str):
"""
Append `path` to sys.path so that files in path can be imported
"""
path = pathlib.Path(path).resolve().as_posix()
if path not in sys.path:
sys.path.append(path)
def register_hook(
base_url: Union[str, List[str]],
download_path: str = "",
modules: List[str] = None,
update_syspath: bool = True,
):
"""
Register import hook to sys.meta_path.
Args:
base_url (str or List[str]): URL(s) where the directory containing Python packages is served through HTTP/S
download_path (str): the path in virtual file system where Python packages will be downloaded, default is current working directory
modules (List[str]): a list, with the names of the root modules/packages that can be imported from the given URL
update_syspath (bool): whether to add ``download_path`` to `sys.path`
**Notes on** ``module`` **parameter**:
If this parameter is not specified, import statement will try to search a module everytime
when the module is not found in local filesystem. This means every FAILED import statement will result in multiple 404 HTTP errors.
So when you have fixed modules, using modules parameter to whitelist downloadable modules in recommended.
"""
global pyfinder
if pyfinder is not None and pyfinder._registered():
raise RuntimeError(
"import hook is already registered, if you want to register a new hook, unregister the existing hook with unregister_hook() first"
)
pyfinder = PyHTTPFinder(base_url, download_path, modules)
pyfinder.register()
if update_syspath:
_update_syspath(download_path)
return pyfinder
def unregister_hook():
"""
Unregister import hook from sys.meta_path.
After calling this method, new external modules cannot be downloaded and imported,
while previously imported modules can be keep available.
"""
global pyfinder
if pyfinder is not None:
pyfinder.unregister()
pyfinder = None
def add_module(module: Union[str, List[str]]):
"""
Add new module(s) that can be imported from URL.
Args:
module (str or List[str]): modules/packages that can be imported from the URL
"""
global pyfinder
if pyfinder is None or (not pyfinder._registered()):
raise RuntimeError("import hook is not registered")
pyfinder.add_module(module)
def available_modules():
"""
Get the list of modules that can be imported from the URL.
"""
global pyfinder
if pyfinder is None or (not pyfinder._registered()):
raise RuntimeError("import hook is not registered")
return pyfinder.available_modules()
| 31.880435 | 142 | 0.699284 |
39af8dcb80c383fcd4bfdd52b3cd4d36dce1df8f
| 1,982 |
py
|
Python
|
rastervision/new_version/batch_submit.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | 1 |
2019-11-07T10:02:23.000Z
|
2019-11-07T10:02:23.000Z
|
rastervision/new_version/batch_submit.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | null | null | null |
rastervision/new_version/batch_submit.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import uuid
import click
from rastervision.rv_config import RVConfig
if __name__ == '__main__':
batch_submit()
| 26.783784 | 79 | 0.589808 |
39b0985dcd907af2111c10e4b763175f9a26f8fe
| 311 |
py
|
Python
|
app/api/item.py
|
peterentroprise/entro-tad
|
b074d4810bcc7fb71b467da8dfaa19be66a41fa2
|
[
"MIT"
] | null | null | null |
app/api/item.py
|
peterentroprise/entro-tad
|
b074d4810bcc7fb71b467da8dfaa19be66a41fa2
|
[
"MIT"
] | null | null | null |
app/api/item.py
|
peterentroprise/entro-tad
|
b074d4810bcc7fb71b467da8dfaa19be66a41fa2
|
[
"MIT"
] | null | null | null |
from fastapi import APIRouter
from models.item_model import Payload
from service import item_service
router = APIRouter()
| 19.4375 | 43 | 0.752412 |
39b1dd9a2298bcc4fe7df8fe5dd5e695bcdaca18
| 6,867 |
py
|
Python
|
scripts/docker_configurator/docker_configurator.py
|
PlenusPyramis/dockerfiles
|
0c1b19faa33e944c66f3762fe49d7f954aa60b12
|
[
"MIT"
] | 1 |
2020-01-10T16:26:32.000Z
|
2020-01-10T16:26:32.000Z
|
scripts/docker_configurator/docker_configurator.py
|
PlenusPyramis/dockerfiles
|
0c1b19faa33e944c66f3762fe49d7f954aa60b12
|
[
"MIT"
] | null | null | null |
scripts/docker_configurator/docker_configurator.py
|
PlenusPyramis/dockerfiles
|
0c1b19faa33e944c66f3762fe49d7f954aa60b12
|
[
"MIT"
] | 2 |
2020-02-22T23:25:24.000Z
|
2020-11-04T05:09:48.000Z
|
"""
Docker Configurator
http://www.github.com/EnigmaCurry/docker-configurator
This tool creates self-configuring docker containers given a single
YAML file.
Run this script before your main docker CMD. It will write fresh
config files on every startup of the container, based off of Mako
templates embedded in the docker image, as well as values specified in
a YAML file provided in a mounted volume.
The idea of this is that container configuration is kind of hard
because everyone does it differently. This creates a standard way of
doing it for containers that I write. A single file to configure
everything.
See the included example project: `docker_configurator_example`
---------------------------------------------------------------------------
Copyright (c) 2019 PlenusPyramis
Copyright (c) 2015 Ryan McGuire
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import yaml
from mako.template import Template
from mako.lookup import TemplateLookup
from mako import exceptions as mako_exceptions
import logging
import argparse
import os
import shutil
import collections
logging.basicConfig(level=logging.INFO)
logger=logging.getLogger("docker_configurator")
__version__ = "v0.9.0"
def deep_merge(*dicts):
"""
Non-destructive deep-merge of multiple dictionary-like objects
>>> a = { 'first' : { 'all_rows' : { 'pass' : 'dog', 'number' : '1', 'recipe':['one','two'] } } }
>>> b = { 'first' : { 'all_rows' : { 'fail' : 'cat', 'number' : '5', 'recipe':['three'] } } }
>>> c = deep_merge(a, b)
>>> a == { 'first' : { 'all_rows' : { 'pass' : 'dog', 'number' : '1', 'recipe':['one','two'] } } }
True
>>> b == { 'first' : { 'all_rows' : { 'fail' : 'cat', 'number' : '5', 'recipe':['three'] } } }
True
>>> c == { 'first' : { 'all_rows' : { 'pass' : 'dog', 'fail' : 'cat', 'number' : '5', 'recipe':['three'] } } }
True
>>> c == deep_merge(a, b, c)
True
"""
# Wrap the merge function so that it is no longer destructive of its destination:
final = {}
for d in dicts:
merge(d, final)
return final
if __name__ == "__main__":
main()
| 39.24 | 114 | 0.663026 |
39b39323fb50875fc0c540df3d833adc6f094d24
| 2,583 |
py
|
Python
|
definition.example.py
|
JoshData/represent-boundaries
|
0a77bad99758bc77140c6c6def4f8d5e68810367
|
[
"MIT"
] | 2 |
2016-07-05T06:10:21.000Z
|
2016-10-20T17:55:13.000Z
|
definition.example.py
|
JoshData/represent-boundaries
|
0a77bad99758bc77140c6c6def4f8d5e68810367
|
[
"MIT"
] | null | null | null |
definition.example.py
|
JoshData/represent-boundaries
|
0a77bad99758bc77140c6c6def4f8d5e68810367
|
[
"MIT"
] | 2 |
2016-07-05T06:10:25.000Z
|
2020-03-04T02:22:24.000Z
|
from datetime import date
import boundaries
boundaries.register('federal-electoral-districts', # The slug of the boundary set
# The name of the boundary set for display.
name='Federal electoral districts',
# Generic singular name for a boundary from this set. Optional if the
# boundary set's name ends in "s".
singular='Federal electoral district', # If this were omitted, the same value would be generated
# Geographic extents which the boundary set encompasses
domain='Canada',
# Path to the shapefile directory. Relative to the current file, so if this file
# is in the same directory as the shapefile -- usually the case -- you can omit
# this parameter.
file='',
# Last time the source was updated or checked for new data
last_updated=date(1970, 1, 1),
# A function that's passed the feature and should return a name string
# The boundaries model provides some simple function factories for this.
name_func=boundaries.clean_attr('FEDENAME'),
# Function to extract a feature's "external_id" property
id_func=boundaries.attr('FEDUID'),
# Function to provide the slug (URL component) of the boundary
# If not provided, uses the name to generate the slug; this is usually
# what you want.
#slug_func=boundaries.attr('FEDUID'),
# Function that returns true/false to determine whether a given feature should be included
# By default, all features are included.
#is_valid_func=lambda f: True,
# Authority that is responsible for the accuracy of this data
authority='H.R.M. Queen Elizabeth II',
# A URL to the source of this data
source_url='http://www12.statcan.gc.ca/census-recensement/2011/geo/bound-limit/bound-limit-eng.cfm',
# A URL to the license for this data
licence_url='http://www12.statcan.gc.ca/census-recensement/2011/geo/bound-limit/license-eng.cfm?lang=_e&year=11&type=fed000a&format=a',
# A URL to the data file, e.g. a ZIP archive
data_url='http://www12.statcan.gc.ca/census-recensement/2011/geo/bound-limit/files-fichiers/gfed000a11a_e.zip',
# Notes identifying any pecularities about the data, such as columns that
# were deleted or files which were merged
notes='',
# Encoding of the text fields in the shapefile, e.g. 'utf-8'. Default: 'ascii'
encoding='iso-8859-1',
# Used only by the represent-maps app -- if you're not using that, ignore label_point_func.
# A function from a feature object to a Point where to display a label for feature on a map.
#label_point_func = lambda feature: None,
)
| 52.714286 | 139 | 0.722416 |
39b549fc5da98ce81d958623dcf67a57d0a50eec
| 2,962 |
py
|
Python
|
tyo_mq_client/publisher.py
|
e-tang/tyo-mq-client-python
|
82ea47bf8cf8a924b515149456eaecb5557a0f3e
|
[
"MIT"
] | null | null | null |
tyo_mq_client/publisher.py
|
e-tang/tyo-mq-client-python
|
82ea47bf8cf8a924b515149456eaecb5557a0f3e
|
[
"MIT"
] | 1 |
2018-06-19T23:42:27.000Z
|
2018-06-20T07:06:25.000Z
|
tyo_mq_client/publisher.py
|
e-tang/tyo-mq-client-python
|
82ea47bf8cf8a924b515149456eaecb5557a0f3e
|
[
"MIT"
] | null | null | null |
#
#
from .subscriber import Subscriber
from .logger import Logger
from .constants import Constants
from .events import Events
#
import json
| 33.280899 | 105 | 0.641458 |
39b57868be76cc021f5f1127464558d697a138df
| 3,560 |
py
|
Python
|
app/authenticate.py
|
directedbyshawn/Secure-Login
|
15f2a6168986b11ffbde318333415671fb62578f
|
[
"MIT"
] | null | null | null |
app/authenticate.py
|
directedbyshawn/Secure-Login
|
15f2a6168986b11ffbde318333415671fb62578f
|
[
"MIT"
] | null | null | null |
app/authenticate.py
|
directedbyshawn/Secure-Login
|
15f2a6168986b11ffbde318333415671fb62578f
|
[
"MIT"
] | null | null | null |
'''
Authentication methods for cs166 final project.
'''
import random, hashlib
from .db import retrieve_accounts
lower_case = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
upper_case = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
nums = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0']
special = ['!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '?', '[', ']', '{', '}', ':', ';', '"', '/', '.', ',', '<', '>']
def authenticate(username, password):
''' Authenticates user upon login '''
# retrieves users from database
users = retrieve_accounts()
stored_username = ''
stored_password = ''
# finds user in records
for user in users:
if user[0] == username:
stored_username = user[0]
stored_password = user[1]
# if user is not found, false
if (stored_username == '' or stored_password == ''):
return False
# retrieves salt and stored password from pw string
salt_length = 40
salt = stored_password[:salt_length]
stored_hash = stored_password[salt_length:]
# compares inputted password with hash and returns result
hashable = salt + password
hashable = hashable.encode('utf-8')
this_hash = hashlib.sha1(hashable).hexdigest()
return this_hash == stored_hash
def verify_new_account(username, password):
'''
Method used to determine if new account credentials are valid
Parameters:
username (str) : username entered by user
password (str) : password entered by user
Returns:
status (bool) : status of if the new credentials are good or not
'''
global lower_case, upper_case, nums, special
# retrieves all users from db and makes a list of all usernames
users = retrieve_accounts()
taken_usernames = []
for accounts in users:
taken_usernames.append(accounts[0])
# status of whether or not password contains the requirements
requirement_one = len(password) >= 8
requirement_two = len(password) <= 25
requirement_three = username not in taken_usernames
requirement_lower = False
requierment_upper = False
requirement_nums = False
requirement_special = False
for char in password:
if char in lower_case:
requirement_lower = True
if char in upper_case:
requierment_upper = True
if char in nums:
requirement_nums = True
if char in special:
requirement_special = True
# SQL injection prevention
for char in username:
if char in special:
return False
status = False
if (requirement_one and requirement_two and requirement_three and requirement_lower and requierment_upper and requirement_nums and requirement_special):
status = True
return status
def random_password():
'''
Function to return randomly generated password
Returns:
password (str) : randomly generated password
'''
global lower_case, upper_case, nums, special
chars = [lower_case, upper_case, nums, special]
password_length = random.randint(12, 16)
password = ''
for i in range(password_length):
lib = chars[random.randint(0, 3)]
char = lib[random.randint(0, len(lib) - 1)]
password += char
return password
| 28.709677 | 156 | 0.589045 |
39b6bd6353821651a0a01cf687e78a807a34d494
| 337 |
py
|
Python
|
tests/base_test_case.py
|
caoziyao/orm
|
24121b8b10910c121a5dff19c6fd9f25ec7f425c
|
[
"MIT"
] | 1 |
2016-10-30T14:41:39.000Z
|
2016-10-30T14:41:39.000Z
|
tests/base_test_case.py
|
caoziyao/orm
|
24121b8b10910c121a5dff19c6fd9f25ec7f425c
|
[
"MIT"
] | null | null | null |
tests/base_test_case.py
|
caoziyao/orm
|
24121b8b10910c121a5dff19c6fd9f25ec7f425c
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
@author: csy
@license: (C) Copyright 2017-2018
@contact: [email protected]
@time: 2018/11/22
@desc:
"""
import unittest
from orm.data_base import Database
| 18.722222 | 65 | 0.688427 |
39b8f43a4fc39e9ee986451845affe8860e4df82
| 381 |
py
|
Python
|
setup.py
|
kervi/kervi-hal-win
|
adb0d93f63b3ed36fd6527c69dc301a63a30138f
|
[
"MIT"
] | null | null | null |
setup.py
|
kervi/kervi-hal-win
|
adb0d93f63b3ed36fd6527c69dc301a63a30138f
|
[
"MIT"
] | null | null | null |
setup.py
|
kervi/kervi-hal-win
|
adb0d93f63b3ed36fd6527c69dc301a63a30138f
|
[
"MIT"
] | null | null | null |
import distutils
from setuptools import setup
try:
from kervi.platforms.windows.version import VERSION
except:
VERSION = "0.0"
try:
distutils.dir_util.remove_tree("dist")
except:
pass
setup(
name='kervi-hal-win',
version=VERSION,
packages=[
"kervi/platforms/windows",
],
install_requires=[
'psutil',
'inputs'
],
)
| 15.24 | 55 | 0.627297 |
39b9562e1c7649e5f232cd655226d45528bdfb68
| 877 |
py
|
Python
|
examples/minimize_koopman_error.py
|
kijanac/Materia
|
b49af518c8eff7d3a8c6caff39783e3daf80a7a0
|
[
"MIT"
] | null | null | null |
examples/minimize_koopman_error.py
|
kijanac/Materia
|
b49af518c8eff7d3a8c6caff39783e3daf80a7a0
|
[
"MIT"
] | null | null | null |
examples/minimize_koopman_error.py
|
kijanac/Materia
|
b49af518c8eff7d3a8c6caff39783e3daf80a7a0
|
[
"MIT"
] | null | null | null |
import argparse
import materia as mtr
import dask.distributed
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--qcenv", type=str)
parser.add_argument("--scratch", type=str)
parser.add_argument("--dask_scratch", type=str)
parser.add_argument("--num_evals", type=int)
args = parser.parse_args()
m = mtr.Molecule("benzene")
qchem = mtr.QChem(qcenv=args.qcenv, scratch_dir=args.scratch)
io = mtr.IO("gs.in", "gs.out", "minimize_koopman_error")
min_ke = qchem.minimize_koopman_error(io, name="min_ke")
min_ke.requires(molecule=m, num_evals=args.num_evals)
wf = mtr.Workflow(min_ke)
cluster = dask.distributed.LocalCluster()
with dask.config.set(temporary_directory=args.dask_scratch):
with dask.distributed.Client(cluster) as client:
print(wf.compute()["min_ke"])
| 31.321429 | 65 | 0.698974 |
39ba8a8ab31258dd5face8cc99e1f8cec294b091
| 300 |
py
|
Python
|
simple/__init__.py
|
jbrid867/SIMPLE
|
56e88c8271c22f7c41bd5d6b148b01e11a9e3713
|
[
"Apache-2.0"
] | 1 |
2019-01-19T06:44:29.000Z
|
2019-01-19T06:44:29.000Z
|
simple/__init__.py
|
jbrid867/SIMPLE
|
56e88c8271c22f7c41bd5d6b148b01e11a9e3713
|
[
"Apache-2.0"
] | 179 |
2018-10-02T21:07:19.000Z
|
2020-09-08T17:38:44.000Z
|
simple/__init__.py
|
johnbridstrup/simple
|
56e88c8271c22f7c41bd5d6b148b01e11a9e3713
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Top-level package for simple."""
__author__ = """John Bridstrup"""
__email__ = '[email protected]'
__version__ = '0.1.8'
# import Data
# import data_analysis
# import kernels
# import KMC
# import running
# import simple
# import simulations
# import statevector
| 17.647059 | 38 | 0.703333 |
39baf90e3f5d1892dbfa7337958aae37f41a76bf
| 13,482 |
py
|
Python
|
emarket/views.py
|
MerlinEmris/eBazar
|
f159314183a8a95afd97d36b0d3d8cf22015a512
|
[
"MIT"
] | null | null | null |
emarket/views.py
|
MerlinEmris/eBazar
|
f159314183a8a95afd97d36b0d3d8cf22015a512
|
[
"MIT"
] | null | null | null |
emarket/views.py
|
MerlinEmris/eBazar
|
f159314183a8a95afd97d36b0d3d8cf22015a512
|
[
"MIT"
] | null | null | null |
# from traceback import TracebackException
from django.contrib.auth.forms import UserCreationForm
# from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.postgres.search import SearchVector
from django.core import serializers
from django.http import JsonResponse
from django.views import View
# import os
# from django.contrib.sites.shortcuts import get_current_site
# from django.utils.encoding import force_bytes
# from django.utils.encoding import force_text
# from django.utils.http import urlsafe_base64_encode
# from django.utils.http import urlsafe_base64_decode
# from django.template.loader import render_to_string
from django.http import HttpResponse
import django_filters.rest_framework
from django.shortcuts import render, redirect
from .forms import ProfilePhotoForm, PhotoForm, SignUpForm, ProfileForm, ItemForm, SearchForm
from .models import User, Profile, Item, Category, Item_Image, Favorite_item
from ebazar import settings
from .serializers import ( CategorySerializer,
ItemSerializer,
UserSerializer,
Item_ImageSerializer,)
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework import viewsets, status
# import django_filters.rest_framework
from rest_framework.generics import (
DestroyAPIView,
ListAPIView,
UpdateAPIView,
RetrieveAPIView,
CreateAPIView
)
from rest_framework.views import APIView
import shutil
import os
import datetime
import json
# print console logs
log_prefix = '['+datetime.datetime.now().strftime("%d-%m-%y %H:%M:%S")+']'
log_end = '********'
log_date = datetime.datetime.now().strftime("%d-%m-%y_%H:%M")
# redirect to create user (url(r'^$'))
# create user with min information
def show_item(request, item_id):
user = request.user
exist = 1
# if user and request.method == "GET":
# favs = Favorite_item.objects.filter(user=user)
#
# for fav in favs:
# if fav.item_id == int(item_id):
# print(fav.item_id)
# exist = 1
# else:
# exist = 0
item = Item.objects.filter(id=item_id)[0]
item_images = Item_Image.objects.filter()
return render(request, 'emarket/item_detail.html', {'item': item,
'pics': item_images,
'exist': exist})
# @login_required
# def add_to_fav(request):
# return redirect('home')
def show_category(request, cat_id):
cat = Category.objects.get(id=cat_id)
items = Item.objects.filter(category=cat)
pics = Item_Image.objects.all()
if items:
paginator = Paginator(items, 9)
page = request.GET.get('page')
try:
items = paginator.page(page)
except PageNotAnInteger:
items = paginator.page(1)
except EmptyPage:
items = paginator.page(paginator.num_pages)
return render(request, 'emarket/show_category.html', {'cat':cat, 'items':items, 'pics':pics})
def home(request):
cats = Category.objects.all()
# item_pic = {}
items = Item.objects.order_by('-price')[0:9]
item_images = Item_Image.objects.filter()
# print(item_images)
# print(items)
# print(categories)
return render(request, 'emarket/home.html', {'cats': cats, 'items': items, 'pics': item_images, })
def search(request, search_word=None):
message = 'hli golar:'
pics = Item_Image.objects.all()
items = Item.objects.all()
form = SearchForm
if request.method == 'POST':
form = SearchForm(request.POST)
search_word = request.POST.get('search')
location = request.POST.get('location')
user = request.POST.get('user')
if location and user:
items = Item.objects.filter(name__icontains=search_word).filter(user=user).filter(location=location)
elif user:
items = Item.objects.filter(name__icontains=search_word).filter(user=user)
elif location:
items = Item.objects.filter(name__icontains=search_word).filter(location=location)
else:
items = Item.objects.filter(name__icontains=search_word)
if items:
message = 'Netijeler:'
else:
message = 'Hi zat ok'
items = None
if items:
paginator = Paginator(items, 18)
page = request.GET.get('page')
try:
items = paginator.page(page)
except PageNotAnInteger:
items = paginator.page(1)
except EmptyPage:
items = paginator.page(paginator.num_pages)
return render(request, 'emarket/expo.html', {'items': items, 'pics': pics, 'ms': message, 's_word': search_word, 'form':form})
class UserCreate(APIView):
# api for item
# api for category
| 34.480818 | 130 | 0.641893 |
39bdb6e5ac777c1dbb29e8d29b5d3a629b8f1d14
| 3,683 |
py
|
Python
|
cogs/misc.py
|
DoggieLicc/doggie-bot
|
31400a32916e08cd5b7909cce17db66ea927d2e3
|
[
"MIT"
] | 3 |
2021-08-30T16:51:04.000Z
|
2021-09-13T17:04:29.000Z
|
cogs/misc.py
|
DoggieLicc/doggie-bot
|
31400a32916e08cd5b7909cce17db66ea927d2e3
|
[
"MIT"
] | 1 |
2021-08-30T15:29:37.000Z
|
2021-09-09T23:59:47.000Z
|
cogs/misc.py
|
DoggieLicc/doggie-bot
|
31400a32916e08cd5b7909cce17db66ea927d2e3
|
[
"MIT"
] | null | null | null |
import discord
import utils
import inspect
from discord.ext import commands
from io import StringIO
| 29 | 103 | 0.555525 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.