seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7784070706
|
"""MYAPP Core application logic."""
from json import (
JSONDecoder,
JSONEncoder,
loads as _json_loads,
)
from logging import getLogger
from pathlib import PosixPath
from http import HTTPStatus
from flask import Blueprint, current_app, request, Response
from flask.views import MethodView
from webargs.flaskparser import FlaskParser
from marshmallow import Schema, fields, pre_dump, RAISE, EXCLUDE
__all__ = [
'APP_PATH',
'APIMethodView',
'APIBlueprint',
'APIError',
'APIRequestSchema',
'APIResponseSchema',
'APIMetadataSchema',
'JSONEncoder',
'JSONDecoder',
'json_dump',
'json_dumps',
'json_loads',
'parse',
'log_request',
'log_response',
]
LOG = getLogger(__name__)
# ----------------------------------CONSTANTS----------------------------------
APP_PATH = PosixPath(__file__).parent
# ----------------------------------CONSTANTS----------------------------------
# -------------------------------WEBARGS SETTINGS-------------------------------
class APIRequestParser(FlaskParser):
def handle_error(self, error, req, schema, *, error_status_code, error_headers):
raise APIError(
'The request specification is invalid; check OpenAPI docs for more info.',
metadata={'errors': error.messages},
http_status=error_status_code or HTTPStatus.OK,
)
def parse_files(self, req, name, field):
raise NotImplementedError
parser = APIRequestParser()
parse = parser.use_args
# -------------------------------WEBARGS SETTINGS-------------------------------
# --------------------------------SERIALIZATION--------------------------------
class APIRequestSchema(Schema):
"""MYAPP base request schema."""
class Meta:
"""Raise on unknown parameters."""
unknown = RAISE
class APICommonRequestSchema(Schema):
"""MYAPP common request parameters."""
class Meta:
"""Do not react on unknown parameters."""
unknown = EXCLUDE
debug_tb_enabled = fields.Boolean(
required=False,
default=False,
)
class APIResponseSchema(Schema):
"""MYAPP base response schema."""
class Meta:
"""Exclude any unknown parameters."""
unknown = EXCLUDE
data = fields.Dict(
required=True,
default=dict,
)
metadata = fields.Nested(
'APIMetadataSchema',
required=True,
)
@classmethod
def default_metadata(cls):
"""
Create default metadata.
:return: metadata fallback
"""
return {
'status': 0,
'message': 'Nice',
'headers': {},
'errors': None,
'details': None,
}
@pre_dump
def pre_dump(self, response, many=None):
"""
Make pre dump handling.
:param response: raw response
:param many: is many
:return: enriched raw response
"""
_ = many
metadata = self.default_metadata()
response_metadata = response.get('metadata', {})
for field in 'status', 'message', 'headers', 'errors', 'details':
if field in response_metadata:
metadata[field] = response_metadata[field]
# FIXME: dynamic messages
if metadata['status'] and metadata['message'] == 'Nice':
metadata['message'] = 'Not nice'
response['metadata'] = metadata
return response
class APIMetadataSchema(Schema):
"""MYAPP Metadata schema."""
status = fields.Integer(
required=True,
default=0,
)
message = fields.String(
required=True,
default='Nice',
)
headers = fields.Dict(
required=True,
default=dict,
)
errors = fields.Dict(
required=True,
allow_none=True,
default=None,
)
details = fields.Dict(
required=True,
allow_none=True,
default=None,
)
# --------------------------------SERIALIZATION--------------------------------
# ------------------------FLASK AND APPLICATION GENERICS------------------------
class APIJSONEncoder(JSONEncoder):
"""MYAPP JSON Encoder."""
def __init__(
self,
*,
skipkeys=False,
check_circular=True,
allow_nan=True,
separators=None,
default=None,
):
"""
Initialize encoder.
:param skipkeys: is skip
:param check_circular: is check circular
:param allow_nan: is allow nan
:param separators: separator char
:param default: default value
"""
ensure_ascii = current_app.config['JSON_ENSURE_ASCII']
sort_keys = current_app.config['JSON_SORT_KEYS']
indent = current_app.config['JSON_INDENT']
super().__init__(
skipkeys=skipkeys,
ensure_ascii=ensure_ascii,
check_circular=check_circular,
allow_nan=allow_nan,
sort_keys=sort_keys,
indent=indent,
separators=separators,
default=default,
)
class APIJSONDecoder(JSONDecoder):
"""MYAPP JSON Decoder."""
def json_dumps(obj, **kwargs):
"""
MYAPP json dumps.
:param obj: object
:param kwargs: any
:return: json string
"""
return APIJSONEncoder(**kwargs).encode(obj)
def json_dump(obj, file, **kwargs):
"""
MYAPP json dump.
:param obj: python object
:param file: filename
:param kwargs: any
"""
for chunk in APIJSONEncoder(**kwargs).iterencode(obj):
file.write(chunk)
def json_loads(string, **kwargs):
"""
MYAPP json loads.
:param string: json string
:param kwargs: any
:return: dict
"""
return _json_loads(string, cls=APIJSONDecoder, **kwargs)
class APIMethodView(MethodView):
"""API Method View."""
decorators = (
parse(APICommonRequestSchema(), location='query'),
)
class APIBlueprint(Blueprint):
"""API Blueprint."""
def log_request():
"""Log request in curl-based fashion."""
msg = fr"curl -w '\n' -iX {request.method} '{request.url}' "
msg += ''.join(f"-H '{h}:{v}' " for h, v in request.headers.items())
if (
request.method in {'POST', 'PUT', 'PATCH'}
and request.headers.get('Content-Type') == 'application/json'
):
msg += f"-d '{request.data.decode('utf8')}'"
LOG.info(msg)
def log_response(response: Response):
"""
Log response json.
:param response: flask response
:return: flask response
"""
if response.is_json:
LOG.info(f'Response: {response.json}')
return response
# ------------------------FLASK AND APPLICATION GENERICS------------------------
# ---------------------------EXCEPTIONS AND MESSAGES---------------------------
class APIError(Exception):
"""Base API Exception."""
def __init__(self, *args, **kwargs):
"""
Initialize API exception.
:param args: any
:param kwargs: any
"""
schema = kwargs.pop('schema', APIResponseSchema())
data = kwargs.pop('data', {})
metadata = kwargs.pop('metadata', {})
metadata.setdefault('message', 'Error' if not args else args[0])
metadata.setdefault('status', 3)
self.json = schema.dump({'data': data, 'metadata': metadata})
self.http_status = kwargs.pop('http_status', HTTPStatus.OK)
super().__init__(*args)
# ---------------------------EXCEPTIONS AND MESSAGES---------------------------
|
jjj4x/flask_api_example
|
src/myapp/core.py
|
core.py
|
py
| 7,547 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pathlib.PosixPath",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "webargs.flaskparser.FlaskParser",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "http.HTTPStatus.OK",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "http.HTTPStatus",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "marshmallow.Schema",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "marshmallow.RAISE",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "marshmallow.Schema",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "marshmallow.EXCLUDE",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.Boolean",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "marshmallow.fields",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "marshmallow.Schema",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "marshmallow.EXCLUDE",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.Dict",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "marshmallow.fields",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.Nested",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "marshmallow.fields",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "marshmallow.pre_dump",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "marshmallow.Schema",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.Integer",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "marshmallow.fields",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.String",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "marshmallow.fields",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.Dict",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "marshmallow.fields",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.Dict",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "marshmallow.fields",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.Dict",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "marshmallow.fields",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "json.JSONEncoder",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "flask.current_app.config",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "flask.current_app",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "flask.current_app.config",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "flask.current_app",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "flask.current_app.config",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "flask.current_app",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "json.JSONDecoder",
"line_number": 207,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "flask.views.MethodView",
"line_number": 245,
"usage_type": "name"
},
{
"api_name": "flask.Blueprint",
"line_number": 253,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 259,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 259,
"usage_type": "name"
},
{
"api_name": "flask.request.url",
"line_number": 259,
"usage_type": "attribute"
},
{
"api_name": "flask.request.headers.items",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "flask.request.headers",
"line_number": 260,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 262,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 262,
"usage_type": "name"
},
{
"api_name": "flask.request.headers.get",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "flask.request.headers",
"line_number": 263,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "flask.request.data.decode",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "flask.request.data",
"line_number": 265,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 265,
"usage_type": "name"
},
{
"api_name": "flask.Response",
"line_number": 269,
"usage_type": "name"
},
{
"api_name": "http.HTTPStatus.OK",
"line_number": 299,
"usage_type": "attribute"
},
{
"api_name": "http.HTTPStatus",
"line_number": 299,
"usage_type": "name"
}
] |
277770918
|
import os, sys
import subprocess
# os.environ['DISPLAY'] = ':99.0'
# os.environ['PYVISTA_OFF_SCREEN'] = 'true'
# os.environ['PYVISTA_USE_IPYVTK'] = 'true'
# bashCommand ="Xvfb :99 -screen 0 1024x768x24 > /dev/null 2>&1 & sleep 3"
# process = subprocess.Popen(bashCommand, stdout=subprocess.PIPE, shell=True)
# process.wait()
sys.path.insert(0, os.path.abspath("../../../.."))
from copy import deepcopy
import numpy as np
import torch
import pyvista as pv
import matplotlib.pyplot as plt
from shapmagn.global_variable import Shape, shape_type
from shapmagn.datasets.data_utils import read_json_into_list, get_obj, get_file_name
from shapmagn.shape.shape_pair_utils import create_shape_pair
from shapmagn.utils.obj_factory import obj_factory
from shapmagn.utils.visualizer import (
visualize_point_fea,
visualize_point_pair,
visualize_multi_point,
)
from shapmagn.utils.local_feature_extractor import *
def get_pair(source_path, target_path, expand_bch_dim=True, return_tensor=True):
get_obj_func = get_obj(
reader_obj,
normalizer_obj,
sampler_obj,
device,
expand_bch_dim=expand_bch_dim,
return_tensor=return_tensor,
)
source_obj, source_interval = get_obj_func(source_path)
target_obj, target_interval = get_obj_func(target_path)
return source_obj, target_obj
def plot_pair_weight_distribution(
source_weight, target_weight, use_log=False, title="", show=True, save_path=None
):
plt.style.use("bmh")
fig, ax = plt.subplots()
source_weight = np.log(source_weight) if use_log else source_weight
target_weight = np.log(target_weight) if use_log else target_weight
ax.hist(source_weight, bins=1000, density=0, histtype="stepfilled", alpha=0.7)
ax.hist(target_weight, bins=1000, density=0, histtype="stepfilled", alpha=0.5)
title += "weight" if not use_log else "log_weight"
ax.set_title(title)
if show:
plt.show()
if save_path:
plt.savefig(save_path, dpi=300)
plt.clf()
def plot_pair_weight_distribution_before_and_after_radius_matching(
source_weight1,
target_weight1,
source_weight2,
target_weight2,
use_log=False,
title="",
show=True,
save_path=None,
):
plt.style.use("bmh")
fig, axes = plt.subplots(nrows=2, ncols=2)
ax0, ax1, ax2, ax3 = axes.flatten()
source_weight_matched1 = matching_np_radius(source_weight1, target_weight1)
smw_sum1, sw_sum1, tp_sum1 = (
source_weight_matched1.sum(),
source_weight1.sum(),
target_weight1.sum(),
)
source_weight1 = np.log(source_weight1) if use_log else source_weight1
target_weight1 = np.log(target_weight1) if use_log else target_weight1
ax0.hist(source_weight1, bins=1000, density=0, histtype="stepfilled", alpha=0.7)
ax0.hist(target_weight1, bins=1000, density=0, histtype="stepfilled", alpha=0.5)
ax0.set_title("sw_sum: {:.3f}, tp_sum:{:.3f}".format(sw_sum1, tp_sum1), fontsize=10)
source_weight_matched1_norm = (
np.log(source_weight_matched1) if use_log else source_weight_matched1
)
ax1.hist(
source_weight_matched1_norm,
bins=1000,
density=0,
histtype="stepfilled",
alpha=0.7,
)
ax1.hist(target_weight1, bins=1000, density=0, histtype="stepfilled", alpha=0.5)
ax1.set_title(
"smw_sum: {:.3f}, tp_sum:{:.3f}".format(smw_sum1, tp_sum1), fontsize=10
)
source_weight_matched2 = matching_np_radius(source_weight2, target_weight2)
smw_sum2, sw_sum2, tp_sum2 = (
source_weight_matched2.sum(),
source_weight2.sum(),
target_weight2.sum(),
)
source_weight2 = np.log(source_weight2) if use_log else source_weight2
target_weight2 = np.log(target_weight2) if use_log else target_weight2
ax2.hist(source_weight2, bins=1000, density=0, histtype="stepfilled", alpha=0.7)
ax2.hist(target_weight2, bins=1000, density=0, histtype="stepfilled", alpha=0.5)
ax2.set_title("sw_sum: {:.3f}, tp_sum:{:.3f}".format(sw_sum2, tp_sum2), fontsize=10)
source_weight_matched2_norm = (
np.log(source_weight_matched2) if use_log else source_weight_matched2
)
ax3.hist(
source_weight_matched2_norm,
bins=1000,
density=0,
histtype="stepfilled",
alpha=0.7,
)
ax3.hist(target_weight2, bins=1000, density=0, histtype="stepfilled", alpha=0.5)
ax3.set_title(
"smw_sum: {:.3f}, tp_sum:{:.3f}".format(smw_sum2, tp_sum2), fontsize=10
)
fig.subplots_adjust(hspace=0.3)
fig.suptitle(title)
if show:
plt.show()
if save_path:
plt.savefig(save_path, dpi=300)
plt.clf()
return source_weight_matched1, source_weight_matched2
def get_half_lung(lung, normalize_weight=False):
weights = lung.weights.detach()
points = lung.points.detach()
pos_filter = points[..., 0] < 0
points = points[pos_filter][None]
weights = weights[pos_filter][None]
weights = weights
weights = weights / weights.sum() if normalize_weight else weights
half_lung = Shape()
half_lung.set_data(points=points, weights=weights)
return half_lung
def get_key_vessel(lung, thre=2e-05):
weights = lung.weights.detach()
points = lung.points.detach()
mask = (lung.weights > thre)[..., 0]
weights = weights[mask][None]
points = points[mask][None]
key_lung = Shape()
key_lung.set_data(points=points, weights=weights)
return key_lung
def sampled_via_radius(source, target):
min_npoints = min(source.npoints, target.npoints)
tw = target.weights.squeeze()
sw = source.weights.squeeze()
t_sorted, t_indices = torch.sort(tw, descending=True)
s_sorted, s_indices = torch.sort(sw, descending=True)
t_sampled_indices = t_indices[:min_npoints]
s_sampled_indices = s_indices[:min_npoints]
tp_sampled = target.points[:, t_sampled_indices]
sp_sampled = source.points[:, s_sampled_indices]
tw_sampled = target.weights[:, t_sampled_indices]
sw_sampled = source.weights[:, s_sampled_indices]
target_sampled, source_sampled = Shape(), Shape()
target_sampled.set_data(points=tp_sampled, weights=tw_sampled)
source_sampled.set_data(points=sp_sampled, weights=sw_sampled)
return source_sampled, target_sampled
def hist_match(source, template):
"""
Adjust the pixel values of a grayscale image such that its histogram
matches that of a target image.
Code adapted from
http://stackoverflow.com/questions/32655686/histogram-matching-of-two-images-in-python-2-x
Arguments:
-----------
source: np.ndarray
Image to transform; the histogram is computed over the flattened
array
template: np.ndarray
Template image; can have different dimensions to source
Returns:
-----------
matched: np.ndarray
The transformed output image
"""
oldshape = source.shape
source = source.ravel()
template = template.ravel()
# get the set of unique pixel values and their corresponding indices and
# counts
s_values, bin_idx, s_counts = np.unique(
source, return_inverse=True, return_counts=True
)
t_values, t_counts = np.unique(template, return_counts=True)
# take the cumsum of the counts and normalize by the number of pixels to
# get the empirical cumulative distribution functions for the source and
# template images (maps pixel value --> quantile)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_quantiles /= t_quantiles[-1]
# interpolate linearly to find the pixel values in the template image
# that correspond most closely to the quantiles in the source image
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
def matching_np_radius(source_weights, target_weights):
"""
:param source_weights: Nx1
:param target_weights: Mx1
:param matched_weights: Nx1
:return:
"""
ns = source_weights.shape[0]
sw = source_weights.squeeze()
tw = target_weights.squeeze()
range = [min(sw.min(), tw.min()), max(sw.max(), tw.max())]
resol = 10000
interp = (range[1] - range[0]) / resol
bins = np.linspace(range[0] - 2 * interp, range[1] + 2 * interp, resol)
sw_indice = np.digitize(sw, bins, right=False)
tw_indice = np.digitize(tw, bins, right=False)
sw_digitize = bins[sw_indice]
tw_digitize = bins[tw_indice]
sw_transformed = hist_match(sw_digitize, tw_digitize)
return sw_transformed.reshape(ns, 1).astype(np.float32)
def matching_shape_radius(source, target, sampled_by_radius=False, show=True):
if sampled_by_radius:
source, target = sampled_via_radius(source, target)
device = source.points.device
sn = source.npoints
tn = target.npoints
sw = source.weights.squeeze().cpu().numpy()
tw = target.weights.squeeze().cpu().numpy()
range = [min(sw.min(), tw.min()), max(sw.max(), tw.max())]
resol = 10000
interp = (range[1] - range[0]) / resol
bins = np.linspace(range[0] - 2 * interp, range[1] + 2 * interp, resol)
sw_indice = np.digitize(sw, bins, right=False)
tw_indice = np.digitize(tw, bins, right=False)
sw_digitize = bins[sw_indice]
tw_digitize = bins[tw_indice]
sw_transformed = hist_match(sw_digitize, tw_digitize)
if show:
plot_pair_weight_distribution(sw_digitize, tw_digitize, use_log=True)
plot_pair_weight_distribution(sw_transformed, tw_digitize, use_log=True)
visualize_point_pair(
source.points,
target.points,
source.weights,
target.weights,
title1="source(before)",
title2="target(before)",
)
visualize_point_pair(
source.points,
target.points,
sw_transformed,
tw_digitize,
title1="source(after)",
title2="target(after)",
)
source.weights = (
torch.tensor(sw_transformed.astype(np.float32)).to(device).view(1, sn, 1)
)
target.weights = (
torch.tensor(tw_digitize.astype(np.float32)).to(device).view(1, tn, 1)
)
return source, target
def source_weight_transform(weights, compute_on_half_lung=False):
weights = weights * 1
weights_cp = deepcopy(weights)
thre = 1.9e-05
thre = thre # if not compute_on_half_lung else thre*2
weights[weights_cp < thre] = 1e-7
return weights
def flowed_weight_transform(weights, compute_on_half_lung=False):
weights = weights * 1
weights_cp = deepcopy(weights)
thre = 1.9e-05
thre = thre # if not compute_on_half_lung else thre * 2
weights[weights_cp < thre] = 1e-7
return weights
def target_weight_transform(weights, compute_on_half_lung=False):
weights = weights * 1
weights_cp = deepcopy(weights)
thre = 1.9e-05
thre = thre # if not compute_on_half_lung else thre * 2
weights[weights_cp < thre] = 1e-7
# weights[weights_cp > 1.1e-05] = 1e-7
return weights
def pair_shape_transformer(init_thres=2.9e-5, nstep=5):
# todo the next step of the transformer is to return a smoothed mask to constrain the movement of the lung
def transform(source, target, cur_step):
min_weights = min(torch.min(source.weights), torch.min(target.weights))
max_weights = min(torch.max(source.weights), torch.max(target.weights))
max_weights = max_weights.item()
cur_step = cur_step.item()
assert init_thres > min_weights
thres = init_thres - (init_thres - min_weights) / nstep * cur_step
s_weights = source.weights.clone()
t_weights = target.weights.clone()
s_weights[source.weights < thres] = 1e-7
t_weights[target.weights < thres] = 1e-7
s_transformed, t_transformed = Shape(), Shape()
s_transformed.set_data(
points=source.points, weights=s_weights, pointfea=source.pointfea
)
t_transformed.set_data(
points=target.points, weights=t_weights, pointfea=target.pointfea
)
print("the weight of the lung pair are updated")
return s_transformed, t_transformed
return transform
def capture_plotter(save_source=False):
from shapmagn.utils.visualizer import visualize_point_pair_overlap
inner_count = 0
def save(record_path, name_suffix, shape_pair):
nonlocal inner_count
source, flowed, target = shape_pair.source, shape_pair.flowed, shape_pair.target
for sp, fp, tp, sw, fw, tw, pair_name in zip(
source.points,
flowed.points,
target.points,
source.weights,
flowed.weights,
target.weights,
pair_name_list,
):
if inner_count == 0 or save_source:
path = os.path.join(
record_path, "source_target" + "_" + name_suffix + ".png"
)
visualize_point_pair_overlap(
sp,
tp,
flowed_weight_transform(fw, True),
target_weight_transform(tw, True),
title1="source",
title2="target",
rgb_on=False,
saving_capture_path=path,
show=False,
)
path_1 = os.path.join(
record_path,
pair_name + "_flowed_target" + "_main_" + name_suffix + ".png",
)
path_2 = os.path.join(
record_path,
pair_name + "_flowed_target" + "_whole_" + name_suffix + ".png",
)
visualize_point_pair_overlap(
fp,
tp,
flowed_weight_transform(fw, True),
target_weight_transform(tw, True),
title1="flowed",
title2="target",
rgb_on=False,
saving_capture_path=path_1,
show=False,
)
visualize_point_pair_overlap(
fp,
tp,
fw,
tw,
title1="flowed",
title2="target",
rgb_on=False,
saving_capture_path=path_2,
show=False,
)
inner_count += 1
return save
def lung_isolated_leaf_clean_up(
lung, radius=0.032, principle_weight=None, normalize_weights=True
):
points = lung.points.detach()
weights = lung.weights.detach()
mass, dev, cov = compute_local_moments(points, radius=radius)
eigenvector_main = compute_local_fea_from_moments(
"eigenvector_main", weights, mass, dev, cov
)
filter = mass[..., 0].squeeze() > 2
to_remove = ~filter
print(
"In the first step, num of points are removed {}, {}".format(
torch.sum(to_remove), torch.sum(to_remove) / len(filter)
)
)
points_toremove = points[:, to_remove]
mass_toremove = mass[:, to_remove]
mass = mass[:, filter]
points = points[:, filter]
weights = weights[:, filter]
eigenvector_main = eigenvector_main[:, filter]
visualize_point_fea_with_arrow(points, mass, eigenvector_main * 0.01, rgb_on=False)
visualize_point_overlap(
points,
points_toremove,
mass,
mass_toremove,
title="cleaned points",
point_size=(10, 20),
rgb_on=False,
opacity=("linear", 1.0),
)
Gamma = compute_anisotropic_gamma_from_points(
points,
cov_sigma_scale=radius,
aniso_kernel_scale=radius,
principle_weight=principle_weight,
)
mass, dev, cov = compute_aniso_local_moments(points, Gamma)
eigenvector_main = compute_local_fea_from_moments(
"eigenvector_main", weights, mass, dev, cov
)
filter = mass[..., 0].squeeze() > 2.5
to_remove = ~filter
print(
"In the second step, num of points are removed {}, {}".format(
torch.sum(to_remove), torch.sum(to_remove) / len(filter)
)
)
points_toremove = points[:, to_remove]
mass_toremove = mass[:, to_remove]
mass = mass[:, filter]
points = points[:, filter]
weights = weights[:, filter]
eigenvector_main = eigenvector_main[:, filter]
visualize_point_fea_with_arrow(points, mass, eigenvector_main * 0.01, rgb_on=False)
visualize_point_overlap(
points,
points_toremove,
mass,
mass_toremove,
title="cleaned points",
point_size=(10, 20),
rgb_on=False,
opacity=("linear", 1.0),
)
Gamma = compute_anisotropic_gamma_from_points(
points,
cov_sigma_scale=radius,
aniso_kernel_scale=radius,
principle_weight=principle_weight,
)
mass, dev, cov = compute_aniso_local_moments(points, Gamma)
eigenvector_main = compute_local_fea_from_moments(
"eigenvector_main", weights, mass, dev, cov
)
filter = mass[..., 0].squeeze() > 3
to_remove = ~filter
print(
"In the third step, num of points are removed {}, {}".format(
torch.sum(to_remove), torch.sum(to_remove) / len(filter)
)
)
points_toremove = points[:, to_remove]
mass_toremove = mass[:, to_remove]
mass = mass[:, filter]
points = points[:, filter]
weights = weights[:, filter]
eigenvector_main = eigenvector_main[:, filter]
visualize_point_fea_with_arrow(points, mass, eigenvector_main * 0.01, rgb_on=False)
visualize_point_overlap(
points,
points_toremove,
mass,
mass_toremove,
title="cleaned points",
point_size=(10, 20),
rgb_on=False,
opacity=("linear", 1.0),
)
cleaned_lung = Shape()
cleaned_lung.points, cleaned_lung.weights = (
points,
weights / torch.sum(weights) if normalize_weights else weights,
)
return cleaned_lung
def analysis_large_vessel(
source,
target,
source_weight_transform=source_weight_transform,
target_weight_transform=target_weight_transform,
title1="source",
title2="target",
):
source_points, source_weights, = (
source.points.detach().cpu(),
source.weights.detach().cpu(),
)
target_points, target_weights, = (
target.points.detach().cpu(),
target.weights.detach().cpu(),
)
plot_pair_weight_distribution(
source_weight_transform(source_weights).squeeze().numpy(),
target_weight_transform(target_weights).squeeze().numpy(),
use_log=True,
)
visualize_point_pair(
source_points,
target_points,
source_weight_transform(source_weights),
target_weight_transform(target_weights),
title1=title1,
title2=title2,
)
def compute_atlas(weight_list):
atlas_weight = np.concatenate(weight_list)
return atlas_weight
def transfer_radius_and_save_sample(
cur_obj, atlas_distri, radius_transfered_saing_path
):
cur_obj["weights"] = matching_np_radius(cur_obj["weights"], atlas_distri)
data = pv.PolyData(cur_obj["points"])
for key, item in cur_obj.items():
if key not in ["points"]:
data.point_arrays[key] = item
data.save(radius_transfered_saing_path)
return cur_obj
if __name__ == "__main__":
assert (
shape_type == "pointcloud"
), "set shape_type = 'pointcloud' in global_variable.py"
device = torch.device("cpu") # cuda:0 cpu
reader_obj = "lung_dataloader_utils.lung_reader()"
normalizer_obj = (
"lung_dataloader_utils.lung_normalizer(weight_scale=60000,scale=[100,100,100])"
)
phase = "train"
use_local_mount = False
remote_mount_transfer = lambda x: x.replace(
"/playpen-raid1", "/home/zyshen/remote/llr11_mount"
)
path_transfer = (
(lambda x: remote_mount_transfer(x)) if use_local_mount else (lambda x: x)
)
dataset_json_path = (
"/playpen-raid1/zyshen/data/lung_expri/{}/pair_data.json".format(phase)
)
dataset_json_path = path_transfer(dataset_json_path)
sampler_obj = "lung_dataloader_utils.lung_sampler( method='voxelgrid',scale=0.0003)"
get_obj_func = get_obj(
reader_obj,
normalizer_obj,
sampler_obj,
device,
expand_bch_dim=False,
return_tensor=False,
)
altas_path = "/playpen-raid1/Data/UNC_vesselParticles/10067M_INSP_STD_MSM_COPD_wholeLungVesselParticles.vtk"
altas_path = path_transfer(altas_path)
atlas, _ = get_obj_func(altas_path)
sampler_obj = "lung_dataloader_utils.lung_sampler( method='combined',scale=0.0003,num_sample=30000,sampled_by_weight=True)"
get_obj_func = get_obj(
reader_obj,
normalizer_obj,
sampler_obj,
device,
expand_bch_dim=False,
return_tensor=False,
)
sampled_atlas, _ = get_obj_func(altas_path)
radius_transfered_saing_path = "/playpen-raid1/zyshen/data/lung_atlas/{}".format(
phase
)
radius_transfered_saing_path = path_transfer(radius_transfered_saing_path)
os.makedirs(radius_transfered_saing_path, exist_ok=True)
pair_name_list, pair_info_list = read_json_into_list(dataset_json_path)
pair_path_list = [
[pair_info["source"]["data_path"], pair_info["target"]["data_path"]]
for pair_info in pair_info_list
]
pair_id = 3
output_path = "/playpen-raid1/zyshen/data/lung_data_analysis/val"
for pair_id in range(len(pair_name_list)):
pair_path = pair_path_list[pair_id]
pair_path = [path_transfer(path) for path in pair_path]
sampler_obj = (
"lung_dataloader_utils.lung_sampler( method='voxelgrid',scale=0.0003)"
)
########################
plot_saving_path = os.path.join(radius_transfered_saing_path, "origin_plots")
os.makedirs(plot_saving_path, exist_ok=True)
source_path, target_path = pair_path_list[pair_id]
source, target = get_pair(
source_path, target_path, expand_bch_dim=False, return_tensor=False
)
saving_path = os.path.join(plot_saving_path, pair_name_list[pair_id] + ".png")
camera_pos = [
(-4.924379645467042, 2.17374925796456, 1.5003730890759344),
(0.0, 0.0, 0.0),
(0.40133888001174545, 0.31574165540339943, 0.8597873634998591),
]
visualize_point_pair(
source["points"],
target["points"],
source["weights"],
target["weights"],
title1="source",
title2="target",
saving_capture_path=saving_path,
camera_pos=camera_pos,
show=False,
)
plot_saving_path = os.path.join(radius_transfered_saing_path, "plots")
os.makedirs(plot_saving_path, exist_ok=True)
# vtk_saving_path = os.path.join(radius_transfered_saing_path,"data")
# os.makedirs(vtk_saving_path,exist_ok=True)
# saving_path = os.path.join(vtk_saving_path,get_file_name(source_path)+".vtk")
# mapped_source = transfer_radius_and_save_sample(source, atlas["weights"], saving_path)
# saving_path = os.path.join(vtk_saving_path,get_file_name(target_path)+".vtk")
# mapped_target = transfer_radius_and_save_sample(target, atlas["weights"], saving_path)
# plot_saving_path = os.path.join(radius_transfered_saing_path, "plots")
# source_vg_weight, target_vg_weight = source["weights"], target["weights"]
# sampler_obj ="lung_dataloader_utils.lung_sampler( method='combined',scale=0.0003,num_sample=30000,sampled_by_weight=True)"
# source, target = get_pair(source_path, target_path, expand_bch_dim=False, return_tensor=False)
# source_combined_weight, target_combined_weight = source["weights"], target["weights"]
# os.makedirs(plot_saving_path,exist_ok=True)
# saving_file_path = os.path.join(plot_saving_path,pair_info_list[pair_id]["source"]["name"]+"_weights_distribution.png")
# title = pair_info_list[pair_id]["source"]["name"] + "_" +"n_sp:{} ".format(len(source_vg_weight))+"n_tp:{}".format(len(atlas["weights"]))
# _,source_combined_mapped_weight =plot_pair_weight_distribution_before_and_after_radius_matching(source_vg_weight, atlas["weights"],source_combined_weight,sampled_atlas["weights"], use_log=True,title=title,show=False,save_path=saving_file_path)
# saving_file_path = os.path.join(plot_saving_path, pair_info_list[pair_id]["target"]["name"] + "_weights_distribution.png")
# title = pair_info_list[pair_id]["target"]["name"] + "_" + "n_sp:{} ".format(len(target_vg_weight)) + "n_tp:{}".format(len(atlas["weights"]))
# _,target_combined_mapped_weight =plot_pair_weight_distribution_before_and_after_radius_matching(target_vg_weight, atlas["weights"], target_combined_weight, sampled_atlas["weights"],use_log=True, title=title, show=False,save_path=saving_file_path)
# saving_path = os.path.join(plot_saving_path, pair_name_list[pair_id]+"_mapped.png")
# camera_pos = [(-4.924379645467042, 2.17374925796456, 1.5003730890759344), (0.0, 0.0, 0.0),
# (0.40133888001174545, 0.31574165540339943, 0.8597873634998591)]
# visualize_point_pair(source["points"], target["points"],
# source_combined_mapped_weight,
# target_combined_mapped_weight,
# title1="source", title2="target", rgb_on=False,saving_capture_path=saving_path,camera_pos=camera_pos,show=False )
# source, target = get_pair(*pair_path)
# source_vg_weight, target_vg_weight = source["weights"], target["weights"]
# title = pair_name_list[pair_id] + "_" +"n_sp:{} ".format(len(source_vg_weight))+"n_tp:{}".format(len(target_vg_weight))
# sampler_obj ="lung_dataloader_utils.lung_sampler( method='combined',scale=0.0003,num_sample=30000,sampled_by_weight=True)"
# source, target = get_pair(source_path, target_path, expand_bch_dim=False, return_tensor=False)
# source_combined_weight, target_combined_weight = source["weights"], target["weights"]
# plot_saving_path = os.path.join(radius_transfered_saing_path,"plots")
# saving_folder_path = os.path.join(output_path,pair_name_list[pair_id])
# os.makedirs(saving_folder_path,exist_ok=True)
# saving_file_path = os.path.join(saving_folder_path,pair_name_list[pair_id]+"_weights_distribution.png")
# plot_pair_weight_distribution_before_and_after_radius_matching(source_vg_weight, target_vg_weight,source_combined_weight,target_combined_weight, use_log=True,title=title,show=False,save_path=saving_file_path)
#
# visualize_point_pair(source["points"], target["points"],
# source["weights"],
# target["weights"],
# title1="source", title2="target", rgb_on=False)
#
#
# shape_pair = create_shape_pair(source, target)
# source_half = get_half_lung(source)
# target_half = get_half_lung(target)
# cleaned_source_half = lung_isolated_leaf_clean_up(source_half,radius=0.02, principle_weight=[2,1,1], normalize_weights=False)
# # visualize_point_pair(source_half.points, cleaned_source_half.points,
# # source_weight_transform(source_half.weights),
# # source_weight_transform(cleaned_source_half.weights),
# # title1="source", title2="cleaned_source", rgb_on=False)
# #
# # plot_pair_weight_distribution(source_weight_transform(source_half.weights).cpu().squeeze().numpy(),
# # target_weight_transform(target_half.weights).cpu().squeeze().numpy(),
# # use_log=True)
#
# visualize_point_pair(source_half.points, target_half.points,
# source_weight_transform(source_half.weights),
# target_weight_transform(target_half.weights),
# title1="source", title2="target", rgb_on=False)
|
uncbiag/shapmagn
|
shapmagn/experiments/datasets/lung/lung_data_analysis.py
|
lung_data_analysis.py
|
py
| 28,299 |
python
|
en
|
code
| 94 |
github-code
|
6
|
[
{
"api_name": "sys.path.insert",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "shapmagn.datasets.data_utils.get_obj",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "numpy.log",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "numpy.log",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "shapmagn.global_variable.Shape",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "shapmagn.global_variable.Shape",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "torch.sort",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "torch.sort",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "shapmagn.global_variable.Shape",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "numpy.cumsum",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "numpy.cumsum",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 215,
"usage_type": "attribute"
},
{
"api_name": "numpy.interp",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "numpy.digitize",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "numpy.digitize",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 246,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "numpy.digitize",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "numpy.digitize",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "shapmagn.utils.visualizer.visualize_point_pair",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "shapmagn.utils.visualizer.visualize_point_pair",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 287,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 290,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "shapmagn.global_variable.Shape",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 367,
"usage_type": "attribute"
},
{
"api_name": "shapmagn.utils.visualizer.visualize_point_pair_overlap",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 381,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 385,
"usage_type": "attribute"
},
{
"api_name": "shapmagn.utils.visualizer.visualize_point_pair_overlap",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "shapmagn.utils.visualizer.visualize_point_pair_overlap",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 465,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 500,
"usage_type": "call"
},
{
"api_name": "shapmagn.global_variable.Shape",
"line_number": 521,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 524,
"usage_type": "call"
},
{
"api_name": "shapmagn.utils.visualizer.visualize_point_pair",
"line_number": 550,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 561,
"usage_type": "call"
},
{
"api_name": "pyvista.PolyData",
"line_number": 569,
"usage_type": "call"
},
{
"api_name": "shapmagn.global_variable.shape_type",
"line_number": 579,
"usage_type": "name"
},
{
"api_name": "torch.device",
"line_number": 581,
"usage_type": "call"
},
{
"api_name": "shapmagn.datasets.data_utils.get_obj",
"line_number": 602,
"usage_type": "call"
},
{
"api_name": "shapmagn.datasets.data_utils.get_obj",
"line_number": 614,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 628,
"usage_type": "call"
},
{
"api_name": "shapmagn.datasets.data_utils.read_json_into_list",
"line_number": 630,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 645,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 645,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 646,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 653,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 653,
"usage_type": "attribute"
},
{
"api_name": "shapmagn.utils.visualizer.visualize_point_pair",
"line_number": 659,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 670,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 670,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 671,
"usage_type": "call"
}
] |
39635306222
|
from datetime import datetime
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.template import loader
from django.urls import reverse
from .models import BusinessIdea
# Create your views here.
def list(request):
ideas_list = BusinessIdea.objects.order_by("-publish_date")[:10]
template = loader.get_template('ideas/list.html')
context = {
'ideas_list': ideas_list,
}
return HttpResponse(template.render(context, request))
def idea(request, idea_id):
try:
idea = BusinessIdea.objects.get (pk=idea_id)
except BusinessIdea.DoesNotExist:
raise Http404("Idea does not exist")
#comments = IdeaComment.objects.filter()
print(idea.__dir__())
return render(request, 'ideas/detail.html', {"idea": idea, "comments": ""})
def idea_new(request):
return render(request, "ideas/idea_new.html")
def idea_new_post(request):
print(request.POST.keys())
try:
username = request.POST['username']
title = request.POST["title"]
body = request.POST["body"]
except (KeyError):
# Redisplay the form.
return render(request, 'ideas/idea_new.html', {
'error_message': "Invalid form.",
})
newIdea = BusinessIdea(
username = username,
title = title,
body = body,
publish_date = datetime.now()
)
newIdea.save()
context = {
"idea": newIdea
}
return HttpResponseRedirect(reverse("ideas:idea", args=(newIdea.id,)))
|
Gael-Bernard/business_ideas_upm
|
business_ideas_upm/ideas/views.py
|
views.py
|
py
| 1,571 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "models.BusinessIdea.objects.order_by",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "models.BusinessIdea.objects",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "models.BusinessIdea",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.template.loader.get_template",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.template.loader",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "models.BusinessIdea.objects.get",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "models.BusinessIdea.objects",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "models.BusinessIdea",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "models.BusinessIdea.DoesNotExist",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "models.BusinessIdea",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "models.BusinessIdea",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 57,
"usage_type": "call"
}
] |
11403898752
|
from torch.utils.data import Dataset
from transformers import Trainer
from transformers import TrainingArguments
from trainer.callbacks.printer import PrinterCallback
from data_manager.batch_sampler import Batch_Sampler
from model.model_parameters import Model_Parameters
from trainer.tne_config import TNE_Config
import torch
import os
import json
os.environ["WANDB_DISABLED"] = "true"
class TNETrainer():
def __init__(self, model: torch.nn.Module, train_set: Dataset, evaluation_set: Dataset, test_set: Dataset,
config: TNE_Config, hyper_parameters: Model_Parameters) -> None:
# Init Trainer properties
self.model = model
self.config = config
self.prepositions_list = config.prepositions_list
self.num_labels = config.num_labels
#################################################
# Init TNE Model #
#################################################
self.train_set = train_set
self.evaluation_set = evaluation_set
self.test_set = test_set
self.test_output_path = self.config.test_output
self.hyper_parameters = hyper_parameters
self.model = model
#################################################
# Init Training Arguments #
#################################################
training_params = hyper_parameters.training_params
evaluation_params = hyper_parameters.evaluation_params
self.training_args = TrainingArguments(output_dir=config.output_dir,
num_train_epochs=training_params["epochs"],
per_device_train_batch_size=training_params['batch_size'],
per_device_eval_batch_size=evaluation_params['batch_size'],
learning_rate=training_params['learning_rate'],
weight_decay=training_params['weight_decay'],
warmup_steps=training_params['warmup_steps'],
logging_dir=config.logs_dir,
logging_steps=5000, # log & save weights each logging_steps
evaluation_strategy="steps", # evaluate each `logging_steps`
eval_steps=evaluation_params['eval_steps'],
save_strategy="no")
#############################################
# Init Trainer #
#############################################
# Metrics
self.batch_collator = Batch_Sampler(tokenizer=self.config.tokenizer,
device_type=self.config.device)
self.trainer = Trainer(
model=self.model, # TNE model
args=self.training_args, # Training arguments, defined above
train_dataset=self.train_set, # Training set
eval_dataset=self.evaluation_set, # Evaluation set
#compute_metrics=self.metrics.compute_metrics, # Callback that computes metrics of interest
callbacks=[
# a printer callback used to draw a graph showing the
# evaluation accuracy of the model over the epochs in the training.
PrinterCallback
],
data_collator=self.batch_collator,
)
def train(self):
# train the model
self.trainer.train()
def evaluate(self):
# evaluate the model performance
self.trainer.evaluate()
def test(self):
# test the model and create a file with the predicted prepositions.
with open(self.test_output_path, 'w') as outfile:
for sample in self.test_set:
batch = self.batch_collator.__call__(batch=[sample])
predictions = self.model(batch['input'], None)
predictions[predictions == 25] = 0
predictions_json = json.dumps({'predicted_prepositions': predictions.flatten().tolist()})
outfile.write(predictions_json + "\n")
|
ranraboh/TNE_TASK
|
trainer/tne_trainer.py
|
tne_trainer.py
|
py
| 4,430 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "trainer.tne_config.TNE_Config",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "model.model_parameters.Model_Parameters",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "model.model_parameters",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "model.model_parameters",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "transformers.TrainingArguments",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "data_manager.batch_sampler.Batch_Sampler",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "transformers.Trainer",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "trainer.callbacks.printer.PrinterCallback",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 90,
"usage_type": "call"
}
] |
16838024238
|
from typing import List
from csvcubed.models.cube import (
Cube,
QbDimension,
ExistingQbDimension,
QbColumn,
CsvColumnUriTemplateMissingError,
QbAttributeLiteral,
CsvColumnLiteralWithUriTemplate,
QbAttribute,
NoDimensionsDefinedError,
)
from csvcubed.models.validationerror import ValidationError
from csvcubed.utils.qb.cube import get_columns_of_dsd_type
from csvcubed.utils.qb.validation.observations import (
validate_observations,
)
def validate_qb_component_constraints(cube: Cube) -> List[ValidationError]:
"""
Validate a :class:`QbCube` to highlight errors in configuration.
:return: A list of :class:`ValidationError <csvcubed.models.validationerror.ValidationError>` s.
"""
errors = _validate_dimensions(cube)
errors += _validate_attributes(cube)
errors += validate_observations(cube)
return errors
def _validate_dimensions(cube: Cube) -> List[ValidationError]:
errors: List[ValidationError] = []
dimension_columns = get_columns_of_dsd_type(cube, QbDimension)
for c in cube.columns:
if isinstance(c, QbColumn) and isinstance(
c.structural_definition, ExistingQbDimension
):
if c.csv_column_uri_template is None:
errors.append(
CsvColumnUriTemplateMissingError(
c.csv_column_title, ExistingQbDimension
)
)
if len(dimension_columns) == 0:
errors.append(NoDimensionsDefinedError())
return errors
def _validate_attributes(cube: Cube) -> List[ValidationError]:
errors: List[ValidationError] = []
for c in cube.columns:
if isinstance(c, QbColumn) and isinstance(c.structural_definition, QbAttribute):
if isinstance(c.structural_definition, QbAttributeLiteral):
if c.csv_column_uri_template is not None:
errors.append(
CsvColumnLiteralWithUriTemplate(
c.csv_column_title,
f"{c.structural_definition.__class__.__name__} "
+ "cannot have a uri_tempate as it holds literal values",
)
)
else:
# Not a QbAttributeLiteral
if (
c.csv_column_uri_template is None
and len(c.structural_definition.new_attribute_values) == 0 # type: ignore
):
errors.append(
CsvColumnUriTemplateMissingError(
c.csv_column_title,
f"{c.structural_definition.__class__.__name__} using existing attribute values",
)
)
return errors
|
GDonRanasinghe/csvcubed-models-test-5
|
csvcubed/csvcubed/utils/qb/validation/cube.py
|
cube.py
|
py
| 2,817 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "csvcubed.models.cube.Cube",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "csvcubed.utils.qb.validation.observations.validate_observations",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "csvcubed.models.validationerror.ValidationError",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "csvcubed.models.cube.Cube",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "csvcubed.models.validationerror.ValidationError",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "csvcubed.utils.qb.cube.get_columns_of_dsd_type",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "csvcubed.models.cube.QbDimension",
"line_number": 37,
"usage_type": "argument"
},
{
"api_name": "csvcubed.models.cube.QbColumn",
"line_number": 40,
"usage_type": "argument"
},
{
"api_name": "csvcubed.models.cube.ExistingQbDimension",
"line_number": 41,
"usage_type": "argument"
},
{
"api_name": "csvcubed.models.cube.CsvColumnUriTemplateMissingError",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "csvcubed.models.cube.ExistingQbDimension",
"line_number": 46,
"usage_type": "argument"
},
{
"api_name": "csvcubed.models.cube.NoDimensionsDefinedError",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "csvcubed.models.validationerror.ValidationError",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "csvcubed.models.cube.Cube",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "csvcubed.models.validationerror.ValidationError",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "csvcubed.models.cube.QbColumn",
"line_number": 59,
"usage_type": "argument"
},
{
"api_name": "csvcubed.models.cube.QbAttribute",
"line_number": 59,
"usage_type": "argument"
},
{
"api_name": "csvcubed.models.cube.QbAttributeLiteral",
"line_number": 60,
"usage_type": "argument"
},
{
"api_name": "csvcubed.models.cube.CsvColumnLiteralWithUriTemplate",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "csvcubed.models.cube.CsvColumnUriTemplateMissingError",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "csvcubed.models.validationerror.ValidationError",
"line_number": 55,
"usage_type": "name"
}
] |
25147617203
|
import errno
import logging as _logging
import socket
import socketserver
import threading
import time
from napalm import utils
# Log
logging = _logging.getLogger("SERVER")
# Temp
# utils.default_logging_setup()
try:
from twisted.internet import reactor
from twisted.internet.protocol import connectionDone, Protocol, ServerFactory
from twisted.protocols.basic import LineReceiver
except ImportError:
logging.warning("There is no Twisted module!")
"""
Conventions:
"raw" - means data with delimiters, not splitted yet.
"data" - str data.
"data_bytes" - bytes data.
Servers and clients operate only with bytes. Protocol converts bytes to str and wise versa.
"""
# Common
class Config:
DELIMITER = b"\x00"
# 1200 - the most optimal max message size to fit IP(?) frame when using TCP
RECV_SIZE = 1200 # 1024 # 4096
@property
def host(self):
return self._host
@property
def port(self):
return self._port
def __init__(self, host="", port=0, protocol_class=None):
self._host = host
self._port = port
if protocol_class:
self.protocol_class = protocol_class
class ServerConfig(Config):
logging = None
pass
class ProtocolFactory:
"""
Single point of creating protocols to be used by any server type.
"""
def __init__(self, config, app=None):
self.config = config
self.app = app
self.protocol_class = config.protocol_class if config and hasattr(config, "protocol_class") else None
self.logging = logging if self.protocol_class and self.protocol_class.is_server_protocol else \
_logging.getLogger("CLIENT")
def dispose(self):
self.logging.debug("ProtocolFactory dispose")
self.config = None
self.app = None
self.protocol_class = None
# self.logging = None
def create(self, send_bytes_method, close_connection_method, address):
if not self.protocol_class:
return None
protocol = self.protocol_class(send_bytes_method, close_connection_method, address, self.config, self.app)
self.logging.debug("ProtocolFactory create new protocol: %s for address: %s", protocol, address)
return protocol
class AbstractServer:
def __init__(self, config, app=None):
self.config = config
self.protocol_factory = ProtocolFactory(config, app)
logging.debug("Server created. %s", self)
def dispose(self):
logging.debug("Server disposing...")
self.stop()
if self.protocol_factory:
self.protocol_factory.dispose()
self.protocol_factory = None
self.config = None
logging.debug("Server disposed")
def start(self):
raise NotImplemented
def stop(self):
raise NotImplemented
# Twisted
# TODO try to rename all protocol to protocol (all depend on TwistedHandler)
class TwistedHandler(LineReceiver):
delimiter = b"\x00"
protocol = None
def connectionMade(self):
# Config
self.delimiter = self.factory.config.DELIMITER
# Create app protocol
address = self.transport.getPeer()
self.protocol = self.factory.protocol_factory.create(self.sendLine, self.transport.loseConnection,
(address.host, address.port))
logging.debug("connectionMade for %s protocol: %s", address, self.protocol)
def rawDataReceived(self, data):
# Not used while in line_mode
pass
def lineReceived(self, line):
# logging.debug("dataReceived for %s line: %s", self.protocol, line)
if line:
self.protocol.process_bytes_list((line,))
# def sendLine(self, line):
# logging.debug("sendData for %s line: %s", self.protocol, line)
# super().sendLine(line)
def connectionLost(self, reason=connectionDone):
logging.debug("connectionLost for %s reason: %s", self.protocol, reason)
self.protocol.dispose()
self.protocol = None
class TwistedTCPServer(AbstractServer):
factory = None
port = None
def __init__(self, config, app=None):
super().__init__(config, app)
self.factory = ServerFactory()
self.factory.protocol = TwistedHandler
# Custom references
self.factory.config = config
self.factory.protocol_factory = self.protocol_factory
self.started = False
self.__started_lock = threading.RLock()
def dispose(self):
super().dispose()
if self.factory:
self.factory.config = None
self.factory.protocol = None
self.factory.protocol_factory = None
self.factory = None
def start(self):
self.__started_lock.acquire()
if self.started:
logging.warning("Server is already running. address: %s", (self.config.host, self.config.port))
self.__started_lock.release()
return
logging.debug("Server starting... address: %s", (self.config.host, self.config.port))
self.started = True
self.__started_lock.release()
self.port = reactor.listenTCP(self.config.port, self.factory)
if not reactor.running:
reactor.run()
logging.debug("Server started")
def stop(self):
self.__started_lock.acquire()
if not self.started:
logging.warning("Server is not running. address: %s", (self.config.host, self.config.port))
self.__started_lock.release()
return
logging.debug("Server stopping...")
self.started = False
self.__started_lock.release()
if self.port:
# deferred = self.port.stopListening()
# if deferred:
# event = threading.Event()
# event.clear()
#
# def event_set():
# print("Waiting finished")
# event.set()
# deferred.addCallback(event_set)
# print("Waiting while listening stopping...", deferred)
# event.wait()
# print("Listening stopped")
self.port.loseConnection()
try:
self.port.connectionLost(None)
except Exception as error:
# Bug in Twisted: sometimes AttributeError ('Port' object has no attribute 'socket') occurs
# print("ERROR", error)
pass
self.port = None
# -reactor.stop()
# reactor.crash()
logging.debug("Server stopped")
# print("Press Enter to exit...")
# input()
# # Needed to save lobby state using atexit.register() in app
# sys.exit()
# Threaded
class ThreadedTCPHandler(socketserver.BaseRequestHandler):
# static
abort = False
buffer_bytes = b""
# is_first = True
config = None
protocol = None
def setup(self):
threading.current_thread().name += "-srv-handler"
self.config = self.server.config
self.protocol = self.server.protocol_factory.create(self.send_bytes, self.request.close,
self.client_address)
logging.debug("connectionMade for %s protocol: %s", self.client_address, self.protocol)
def finish(self):
logging.debug("connectionLost for %s", self.protocol)
self.protocol.dispose()
self.protocol = None
self.config = None
def send_bytes(self, data_bytes):
# logging.debug("sendData for %s line: %s", self.protocol, data_bytes)
self.request.sendall(data_bytes + self.config.DELIMITER)
def handle(self):
while not self.server.abort:
# Read
is_data = True
data_bytes = None
while not self.server.abort and is_data and self.config.DELIMITER not in self.buffer_bytes:
try:
data_bytes = self.request.recv(self.config.RECV_SIZE)
is_data = bool(data_bytes)
self.buffer_bytes += data_bytes
except socket.error as error:
# Note: current buffer won't be processed, but it usually empty in such cases
logging.debug(" (connectionLost (abort) for %s reason: %s)", self.protocol, error)
return
# Parse bytes
# b"command1##command2##\x00command3##\x00" -> [b"command1##command2##", b"command3##", b""]
# b"1||param||##5||param||##\x0010||param||##\x00" ->
# [b"1||param||##5||param||##", b"10||param||##", b""]
if self.buffer_bytes:
# print("TEMP SERVER config:", self.server and self.config)
data_bytes_list = self.buffer_bytes.split(self.config.DELIMITER)
self.buffer_bytes = data_bytes_list.pop()
# Process
try:
# (Try-except: because send method could be invoked during processing)
if self.protocol and data_bytes_list:
self.protocol.process_bytes_list(data_bytes_list)
# (Don't use socket.error because it causes StopIteration, which would not be caught)
# except socket.error as error:
except Exception as error:
logging.debug(" (connectionLost for %s reason: %s)", self.protocol, error)
return
if not data_bytes:
if not self.server.abort:
reason = "(Empty data received: %s)" % data_bytes
logging.debug(" (connectionLost for %s reason: %s)", self.protocol, reason)
return
class ThreadedTCPServer(AbstractServer):
server = None
def __init__(self, config, app=None):
super().__init__(config, app)
self.started = False
self.__started_lock = threading.RLock()
self.__shutdown_event = threading.Event()
self.__shutdown_event.set()
# def dispose(self):
# super().dispose()
def start(self):
if not self.config:
logging.error("Server is not initialized")
return
self.__started_lock.acquire()
if self.started:
logging.warning("Server is already running. address: %s", (self.config.host, self.config.port))
self.__started_lock.release()
return
# Create and start server
address = (self.config.host, self.config.port)
logging.debug("Server starting... address: %s", address)
self.started = True
self.__started_lock.release()
self.server = socketserver.ThreadingTCPServer(address, ThreadedTCPHandler)
self.server.protocol_factory = self.protocol_factory
self.server.config = self.config
self.server.abort = False
logging.debug("Server started")
self.__shutdown_event.clear()
try:
self.server.serve_forever()
except KeyboardInterrupt as error:
logging.info("^C KeyboardInterrupt", error)
# Here we shutting down the server
logging.debug("Server shutting down...")
# (Abort other threads)
self.server.abort = True
self.server.server_close()
self.server.protocol_factory = None
self.server.config = None
self.server = None
logging.debug("Server shut down")
self.__shutdown_event.set()
# print("Press Enter to exit...")
# input()
# # Needed to save lobby state using atexit.register() in app
# sys.exit()
def stop(self):
self.__started_lock.acquire()
if not self.started:
logging.warning("Server is not running. address: %s", (self.config.host, self.config.port))
self.__started_lock.release()
return
# Preventing
logging.debug("Server stopping... address: %s", (self.config.host, self.config.port))
self.started = False
self.__started_lock.release()
t = time.time()
self.server.shutdown()
self.__shutdown_event.wait()
logging.debug("Server stopped in %f sec (95%% of time is exiting from serve_forever())", time.time() - t)
# Non-blocking
class NonBlockingTCPServer(AbstractServer):
_sock = None
def __init__(self, config, app=None):
super().__init__(config, app)
# (Needed for walking through all connections on each tick and receiving available data)
self._protocol_list = []
self._request_by_protocol = {}
self._buffer_by_protocol = {}
self._abort = False
self.started = False
self.__started_lock = threading.RLock()
self.__shutdown_event = threading.Event()
self.__shutdown_event.set()
def start(self):
if not self.config:
logging.warning("Server is not initialized")
return
address = (self.config.host, self.config.port)
logging.debug("Server starting... address: %s", address)
self.__started_lock.acquire()
if self.started:
logging.warning("Server is already running. address: %s", (self.config.host, self.config.port))
self.__started_lock.release()
return
self.started = True
self.__started_lock.release()
# (If restarting)
self._abort = False
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._sock.bind(address)
self._sock.listen()
self._sock.setblocking(0)
logging.debug("Server started")
self.__shutdown_event.clear()
try:
self._workflow(self._sock)
except KeyboardInterrupt as error:
logging.debug("^C KeyboardInterrupt %s", error)
logging.debug("Server shutting down...")
# self._abort = True
try:
self._sock.shutdown(socket.SHUT_RDWR)
except socket.error as error:
logging.error("Error while shutting down: %s", error)
self._sock.close()
self._sock = None
# (list() needed to make a copy)
for protocol in list(self._protocol_list):
protocol.dispose()
self._protocol_list.clear()
self._request_by_protocol.clear()
self._buffer_by_protocol.clear()
logging.debug("Server shut down")
# logging.debug("Server stopped")
self.__shutdown_event.set()
# (For standalone. Bad for tests)
# print("Press Enter to exit...")
# input()
# # Needed to save lobby state using atexit.register() in app
# sys.exit()
def stop(self):
logging.debug("Server stopping...")
self.__started_lock.acquire()
if not self.started:
logging.warning("Server is not running. address: %s", (self.config.host, self.config.port))
self.__started_lock.release()
return
# If was started, but yet is not stopping
self.started = False
self.__started_lock.release()
self._abort = True
self.__shutdown_event.wait()
logging.debug("Server stopped")
def _process_disconnect(self, protocol, error):
logging.debug("connectionLost for %s reason: %s", protocol, error)
protocol.dispose()
self._protocol_list.remove(protocol)
if protocol in list(self._request_by_protocol):
del self._request_by_protocol[protocol]
if protocol in list(self._buffer_by_protocol):
del self._buffer_by_protocol[protocol]
def _workflow(self, sock):
while not self._abort:
# print("SERVER. While...")
# Connect
request, address = None, None
try:
request, address = sock.accept()
# socket.error (real error is [WinError 10035])
except Exception as error:
# print("accept error:", error)
# There is no new connections - skip
pass
if request:
# New connection
def send_bytes(data_bytes):
# logging.debug("sendData for %s line: %s", self.protocol, data_bytes)
request.sendall(data_bytes + self.config.DELIMITER)
# Create protocol
protocol = self.protocol_factory.create(send_bytes, request.close, address)
logging.debug("connectionMade for %s protocol: %s", address, protocol)
self._protocol_list.append(protocol)
self._request_by_protocol[protocol] = request
# Walk through all connections looking for new data to receive
i = 0
for protocol in self._protocol_list:
i += 1
request = self._request_by_protocol[protocol]
# Read
buffer_bytes = self._buffer_by_protocol.get(self, b"")
is_data = True
data_bytes = None
while is_data:
try:
data_bytes = request.recv(self.config.RECV_SIZE)
is_data = bool(data_bytes)
buffer_bytes += data_bytes
# print("SERVER. recv data_bytes:", data_bytes, "buffer_bytes:", buffer_bytes)
# socket.error
except Exception as error:
# (break) is_data = False
# print("SERVER. Error (recv)", error)
if not hasattr(error, "errno") or error.errno != errno.EWOULDBLOCK:
self._process_disconnect(protocol, error)
# Process next connection for both disconnect and no data received now
break
if not data_bytes:
self._process_disconnect(protocol, "(Empty data received: %s)" % data_bytes)
if not buffer_bytes:
continue
# Parse bytes
data_bytes_list = buffer_bytes.split(self.config.DELIMITER)
self._buffer_by_protocol[self] = data_bytes_list.pop()
# Process
try:
# (Try-except: because send method could be invoked during processing)
if protocol and data_bytes_list:
logging.debug("dataReceived for %s line: %s", protocol, buffer_bytes)
protocol.process_bytes_list(data_bytes_list)
# socket.error
except Exception as error:
self._process_disconnect(protocol, error)
break
|
markelov-alex/py-sockets
|
napalm/socket/server.py
|
server.py
|
py
| 18,945 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "twisted.protocols.basic.LineReceiver",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "logging.debug",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "twisted.internet.protocol.connectionDone",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "logging.debug",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "twisted.internet.protocol.ServerFactory",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "threading.RLock",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "twisted.internet.reactor.listenTCP",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "twisted.internet.reactor",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "twisted.internet.reactor.running",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "twisted.internet.reactor",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "twisted.internet.reactor.run",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "twisted.internet.reactor",
"line_number": 180,
"usage_type": "name"
},
{
"api_name": "logging.debug",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "socketserver.BaseRequestHandler",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "threading.current_thread",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "socket.error",
"line_number": 263,
"usage_type": "attribute"
},
{
"api_name": "logging.debug",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "threading.RLock",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "threading.Event",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "socketserver.ThreadingTCPServer",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "threading.RLock",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "threading.Event",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 393,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 406,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 406,
"usage_type": "attribute"
},
{
"api_name": "socket.SOL_SOCKET",
"line_number": 407,
"usage_type": "attribute"
},
{
"api_name": "socket.SO_REUSEADDR",
"line_number": 407,
"usage_type": "attribute"
},
{
"api_name": "logging.debug",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 420,
"usage_type": "call"
},
{
"api_name": "socket.SHUT_RDWR",
"line_number": 423,
"usage_type": "attribute"
},
{
"api_name": "socket.error",
"line_number": 424,
"usage_type": "attribute"
},
{
"api_name": "logging.error",
"line_number": 425,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 446,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 449,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 458,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 489,
"usage_type": "call"
},
{
"api_name": "errno.EWOULDBLOCK",
"line_number": 513,
"usage_type": "attribute"
},
{
"api_name": "logging.debug",
"line_number": 531,
"usage_type": "call"
}
] |
71087029308
|
"""Simple wrapper for app"""
import json
from rich.console import Console
from typing import List
import requests
from src.utils import Oracles
class FlaskAppClient:
ERROR_KEY = "error"
TRACEBACK_KEY = "traceback"
def __init__(self, base_url="http://127.0.0.1:5000"):
self.base_url = base_url
self.console = Console()
def _handle_response(self, response):
try:
response_data = response.json()
except json.JSONDecodeError:
self.console.print("[red]Failed to parse server response as JSON[/red]")
self.console.print("Response from server: " + str(response))
response.raise_for_status() # This will raise an HTTPError if the HTTP request returned an unsuccessful status code.
if response.status_code == 200:
return response_data
else:
error = response_data.get(self.ERROR_KEY, 'Unknown error')
tb = response_data.get(self.TRACEBACK_KEY, None)
self.console.print(f"[red]Server error: {error}[/red]")
if tb:
self.console.print(f"[yellow]{tb}[/yellow]")
raise RuntimeError(f"Server error: {error}")
def all_results(self):
response = requests.post(f"{self.base_url}/all_results", json={})
return self._handle_response(response)
def all_scores(self, user_token):
payload = {
"token": user_token
}
response = requests.post(f"{self.base_url}/all_scores", json=payload)
return self._handle_response(response)
def score_compounds_and_update_leaderboard(self, compounds, oracle_name, user_token):
payload = {
"compounds": ",".join(compounds),
"oracle_name": oracle_name,
"token": user_token
}
response = requests.post(f"{self.base_url}/score_compounds_and_update_leaderboard", json=payload)
return self._handle_response(response)
# Usage Example:
if __name__ == "__main__":
client = FlaskAppClient()
token = "test-0"
# Example for scoring compounds
compounds = ["CC", "CCC"]
oracle_name = "DRD2"
response = client.score_compounds_and_update_leaderboard(compounds, oracle_name, token)
print(response)
# Example of error handling
compounds = ["Cxxxxx"]
oracle_name = "DRD2"
response = client.score_compounds_and_update_leaderboard(compounds, oracle_name, token)
print(response)
|
molecule-one/mlinpl-23-workshops
|
src/server_wrapper.py
|
server_wrapper.py
|
py
| 2,462 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "rich.console.Console",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "json.JSONDecodeError",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "requests.post",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 52,
"usage_type": "call"
}
] |
43431205524
|
import datetime
import uuid
import logging
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import pandas as pd
import sys
import pprint
import traceback
from core.scraper.scraper import Scraper
from core.db.db_helper import DbHelper
from common.constants import THREAD_NO, LARGE_CHUNK, BULK_CHUNK
from common.protobuf_to_dict.protobuf_to_dict.convertor import protobuf_to_dict
from common.app_object import App
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
level=logging.INFO)
pp = pprint.PrettyPrinter(indent=4)
class Updater:
"""
Keeps iterating over the database till the script is interrupted and
collecting meta-data for apps that have previously been scraped.
"""
def __init__(self, input_file=None):
self.__db_helper = DbHelper()
self.input_file = input_file
# ***************** #
# updating all related functions
# ***************** #
def update_apps(self):
"""
Uses bulk scraping to update apps much faster than before
"""
if self.input_file is None:
# dicts representing each app and info e.g. current version code, uuid, etc.
apps = self.__db_helper.get_package_names_to_update(0)
else:
apps = pd.read_csv(self.input_file)["packageName"].tolist()
self.s = Scraper()
app_names = []
app_data = []
removed_apps = []
total_apps_no = len(apps)
logger.info("Starting bulk update with {} apps...".format(total_apps_no))
with ThreadPoolExecutor(max_workers=THREAD_NO) as executor:
res = executor.map(self.update_all_thread_worker,
range(0, total_apps_no), apps)
counter = 0
for future in res:
if future is not None:
app_names.append(future[0])
if future[1] is not None and future[2] is not None:
app_data.append((future[1], future[2]))
else:
removed_apps.append(future[0])
counter += 1
if counter % LARGE_CHUNK == 0:
logger.info("updated {} to {} out of {}".format(
counter - LARGE_CHUNK, counter, total_apps_no))
if counter % (BULK_CHUNK * 10) == 0:
logger.info("updating {} apps as removed".format(len(removed_apps)))
self.__db_helper.update_apps_as_removed(removed_apps)
removed_apps = []
try:
logger.info("inserting {} updated apps to db...".format(len(app_data)))
self.__db_helper.insert_apps_into_db(app_data)
app_data = []
except Exception as e:
logger.error("db insertion failed - {}".format(e))
print(traceback.format_exc())
logger.error(traceback.format_exc())
logger.info("completed all out of {}".format(total_apps_no))
logger.info("updating {} apps as removed".format(len(removed_apps)))
self.__db_helper.update_apps_as_removed(removed_apps)
logger.info("inserting {} updated apps to db...".format(len(app_data)))
self.__db_helper.insert_apps_into_db(app_data)
self.__db_helper.update_apps_as_not_removed(app_names)
self.__db_helper.update_date_last_scraped(app_names,
datetime.datetime.utcnow().strftime("%Y%m%dT%H%M"))
def update_all_thread_worker(self, index, app_name):
# bulk scrape to check for updates
s = self.s
"""
try:
"""
metadata = s.get_metadata_for_apps([app_name], bulk=False)
if metadata is None:
# app removed
return (app_name, None, None)
if len(list(metadata)) == 0:
return (app_name, None, None)
new_info, new_detail = list(metadata)[0]
num_updated = 0
if new_info is None:
# app is removed
logger.error("app {} has been removed".format(app_name))
return (app_name, None, None)
if new_info.packageName != app_name: # TODO why
logger.error("mismatching package names")
return
if new_info.versionCode is None or new_info.uploadDate is None:
# TODO add crawler code here to fix this, ignore for now
logger.warning("{} - null versionCode or uploadDate, ignoring".format(app_name))
return
return (app_name, new_info, new_detail)
"""
if new_info.versionCode is not None:
info_vc = new_info.versionCode
details_dict = protobuf_to_dict(new_detail)
if info_vc != details_dict["details"]["appDetails"]["versionCode"]:
logger.error("VERSION MISMATCH for {}".format(app_name))
return
# check version code to see if app is updated
updated = self.__db_helper.check_app_to_update(app_name, new_info.versionCode)
else:
# if not provided just assume is updated
updated = True
if updated:
return (app_name, new_info, new_detail)
else:
return None
"""
"""
except Exception as e:
logger.error("{} - {}".format(app_name, str(e)))
"""
"""
if __name__ == '__main__':
while True:
try:
up = Updater()
up.update_apps()
except KeyboardInterrupt:
logger.warning("Updater interrupted by user")
"""
|
CMUChimpsLab/playstore-scraper
|
core/scraper/updater.py
|
updater.py
|
py
| 5,745 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "pprint.PrettyPrinter",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "core.db.db_helper.DbHelper",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "core.scraper.scraper.Scraper",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.ThreadPoolExecutor",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "common.constants.THREAD_NO",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "common.constants.LARGE_CHUNK",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "common.constants.LARGE_CHUNK",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "common.constants.BULK_CHUNK",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "traceback.format_exc",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "traceback.format_exc",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 87,
"usage_type": "attribute"
}
] |
10906525746
|
import argparse
import time
import pika
from pika.exceptions import (
ChannelClosed,
ConnectionClosed,
AMQPConnectionError,
AMQPHeartbeatTimeout,
)
class Logger:
LOG_EXCHANGE = "logs"
LOG_EXCHANGE_TYPE = "topic"
def __init__(self, url, routing_keys):
connection = pika.BlockingConnection(pika.URLParameters(url))
channel = connection.channel()
channel.exchange_declare(
exchange=self.LOG_EXCHANGE,
exchange_type=self.LOG_EXCHANGE_TYPE,
durable=True,
)
# We declare a transient queue because we don't want to fill-up rabbitmq
# with logs if the logger is down
result = channel.queue_declare("", exclusive=True)
queue_name = result.method.queue
for key in routing_keys:
channel.queue_bind(exchange="logs", queue=queue_name, routing_key=key)
# Logger queue is auto ack for minimum overhead as we don't care losing some
# messages (very rare as we rarely fail)
channel.basic_consume(
queue=queue_name, on_message_callback=self.callback, auto_ack=True
)
self._channel = channel
self._connection = connection
def callback(self, ch, method, properties, body):
print("[{}] {}".format(method.routing_key, body.decode("utf-8")))
def run(self):
try:
self._channel.start_consuming()
except KeyboardInterrupt:
return True
except (
ChannelClosed,
ConnectionClosed,
AMQPConnectionError,
AMQPHeartbeatTimeout,
):
return False
finally:
if not self._connection.is_closed:
self._connection.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Display selected logs in realtime on the given broker"
)
parser.add_argument("amqp_url", help="URL of the broker, including credentials")
parser.add_argument(
"--filter",
help="Log patterns to subscribe to (default to all)",
nargs="*",
default=["#"],
)
args = parser.parse_args()
expected_stop = False
print("Ctrl-C to quit.")
print("Subcribing to logs:", args.filter)
while not expected_stop:
try:
logger = Logger(args.amqp_url, args.filter)
except AMQPConnectionError:
print("could not connect; retry…")
time.sleep(2)
continue
print("connected!")
expected_stop = logger.run()
print("bye!")
|
allo-media/eventail
|
scripts/logger.py
|
logger.py
|
py
| 2,599 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "pika.BlockingConnection",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pika.URLParameters",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pika.exceptions.ChannelClosed",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "pika.exceptions.ConnectionClosed",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "pika.exceptions.AMQPConnectionError",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "pika.exceptions.AMQPHeartbeatTimeout",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pika.exceptions.AMQPConnectionError",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 83,
"usage_type": "call"
}
] |
29209651660
|
import os
from pathlib import Path
def correct_content(req):
with open(req, "rb") as fp:
content = fp.read()
try:
if b"\x00" in content:
raise ValueError()
content = content.decode("utf-8")
except (UnicodeDecodeError, ValueError):
content = (
content.replace(b"\xff", b"")
.replace(b"\xfe", b"")
.replace(b"\x00", b"")
.decode("utf-8")
)
with open(req, "w") as fp:
fp.write(content)
return content
def main():
root = Path("src", "tests4py", "projects", "resources")
assert root.exists() and root.is_dir(), f"Wrong cwd {Path.cwd()}"
for p in os.listdir(root):
project = root / p
default_req = project / "requirements.txt"
default_content = ""
if default_req.exists():
default_content = correct_content(default_req)
if p != "__pycache__" and project.is_dir():
reqs = dict()
for b in os.listdir(project):
bug = project / b
if bug.is_dir():
req = bug / "requirements.txt"
if req.exists():
print(req)
reqs[b] = correct_content(req)
elif default_req.exists():
reqs[b] = default_content
if len(reqs) > 0:
count = dict()
for r in reqs.values():
if r in count:
count[r] += 1
else:
count[r] = 1
r = max(count, key=count.get)
if count[r] > 1:
with open(default_req, "w") as fp:
fp.write(r)
for b in reqs:
if r == reqs[b] and (project / b / "requirements.txt").exists():
os.remove(project / b / "requirements.txt")
if __name__ == "__main__":
main()
|
smythi93/Tests4Py
|
requirements.py
|
requirements.py
|
py
| 2,015 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "pathlib.Path",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pathlib.Path.cwd",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 57,
"usage_type": "call"
}
] |
21247797774
|
import os
import pandas as pd
from sklearn.model_selection import train_test_split
import click
FILENAME_DATA = "data.csv"
FILENAME_TARGET = "target.csv"
FILENAME_TRAIN_X = "X_train.csv"
FILENAME_TRAIN_Y = "y_train.csv"
FILENAME_TEST_X = "X_test.csv"
FILENAME_TEST_Y = "y_test.csv"
@click.command("split_data")
@click.option("--input-dir")
@click.option("--output-dir")
@click.option("--size", type=float)
@click.option("--random-state", type=int)
def split_data(input_dir: str, output_dir: str, size: float, random_state: int):
path_data = os.path.join(input_dir, FILENAME_DATA)
features_df = pd.read_csv(path_data)
X_train, X_test = train_test_split(features_df, test_size=size, random_state=random_state)
path_target = os.path.join(input_dir, FILENAME_TARGET)
target_df = pd.read_csv(path_target)
y_train, y_test = train_test_split(target_df, test_size=size, random_state=random_state)
os.makedirs(output_dir, exist_ok=True)
X_train.to_csv(os.path.join(output_dir, FILENAME_TRAIN_X), index=False)
X_test.to_csv(os.path.join(output_dir, FILENAME_TEST_X), index=False)
y_train.to_csv(os.path.join(output_dir, FILENAME_TRAIN_Y), index=False)
y_test.to_csv(os.path.join(output_dir, FILENAME_TEST_Y), index=False)
if __name__ == '__main__':
split_data()
|
made-mlops-2022/alexey_sklyannyy
|
airflow_ml_dags/images/airflow-split/split_data.py
|
split_data.py
|
py
| 1,308 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.join",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "click.command",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 20,
"usage_type": "call"
}
] |
23800674981
|
from pynput.keyboard import Key,Listener
keys=[]
def on_press(key):
try:
key=str(key)
if(key=='Key.enter'):
key='\n'
elif(key=='Key.space'):
key=' '
elif(key=='Key.alt'):
key=' alt '
elif(key=='Key.ctrl'):
key=' ctrl '
elif(key=='Key.backspace'):
key=' backspace '
elif(Key=='Key.shift'):
key=' shift '
f=open('a.txt','a')
key=key.strip('\'')
f.write(key)
except Exception as e:
print(e)
f.close()
#print("{0} pressed".format(key))
#def on_release(key):
# if(key==Key.esc):
# return False
try:
with Listener(on_press=on_press) as listener:
listener.join()
except:
print('\n...')
|
prajwalcbk/tools
|
keylogger/3.py
|
3.py
|
py
| 791 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pynput.keyboard.Key",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "pynput.keyboard.Listener",
"line_number": 34,
"usage_type": "call"
}
] |
36396554295
|
"""
Compare catalogs of candidates and benchmarks.
"""
from __future__ import annotations
# __all__ = ['*']
__author__ = "Fernando Aristizabal"
from typing import Iterable, Optional, Callable, Tuple
import os
import pandas as pd
from rioxarray import open_rasterio as rxr_or
import xarray as xr
import dask.dataframe as dd
def catalog_compare(
candidate_catalog: pd.DataFrame | dd.DataFrame,
benchmark_catalog: pd.DataFrame | dd.DataFrame,
map_ids: str | Iterable[str],
how: str = "inner",
on: Optional[str | Iterable[str]] = None,
left_on: Optional[str | Iterable[str]] = None,
right_on: Optional[str | Iterable[str]] = None,
suffixes: tuple[str, str] = ("_candidate", "_benchmark"),
merge_kwargs: Optional[dict] = None,
open_kwargs: Optional[dict] = None,
compare_type: str | Callable = "continuous",
compare_kwargs: Optional[dict] = None,
agreement_map_field: Optional[str] = None,
agreement_map_write_kwargs: Optional[dict] = None,
) -> pd.DataFrame | dd.DataFrame:
"""
Compare catalogs of candidate and benchmark maps.
Parameters
----------
candidate_catalog : pandas.DataFrame | dask.DataFrame
Candidate catalog.
benchmark_catalog : pandas.DataFrame | dask.DataFrame
Benchmark catalog.
map_ids : str | Iterable of str
Column name(s) where maps or paths to maps occur. If str is given, then the same value should occur in both catalogs. If Iterable[str] is given of length 2, then the column names where maps are will be in [candidate, benchmark] respectively.
The columns corresponding to map_ids should have either str, xarray.DataArray, xarray.Dataset, rasterio.io.DatasetReader, rasterio.vrt.WarpedVRT, or os.PathLike objects.
how : str, default = "inner"
Type of merge to perform. See pandas.DataFrame.merge for more information.
on : str | Iterable of str, default = None
Column(s) to join on. Must be found in both catalogs. If None, and left_on and right_on are also None, then the intersection of the columns in both catalogs will be used.
left_on : str | Iterable of str, default = None
Column(s) to join on in left catalog. Must be found in left catalog.
right_on : str | Iterable of str, default = None
Column(s) to join on in right catalog. Must be found in right catalog.
suffixes : tuple of str, default = ("_candidate", "_benchmark")
Suffixes to apply to overlapping column names in candidate and benchmark catalogs, respectively. Length two tuple of strings.
merge_kwargs : dict, default = None
Keyword arguments to pass to pandas.DataFrame.merge.
compare_type : str | Callable, default = "continuous"
Type of comparison to perform. If str, then must be one of {"continuous", "categorical", "probabilistic"}. If Callable, then must be a function that takes two xarray.DataArray or xarray.Dataset objects and returns a tuple of length 2. The first element of the tuple must be an xarray.DataArray or xarray.Dataset object representing the agreement map. The second element of the tuple must be a pandas.DataFrame object representing the metrics.
compare_kwargs : dict, default = None
Keyword arguments to pass to the compare_type function.
agreement_map_field : str, default = None
Column name to write agreement maps to. If None, then agreement maps will not be written to file.
agreement_map_write_kwargs : dict, default = None
Keyword arguments to pass to xarray.DataArray.rio.to_raster when writing agreement maps to file.
Raises
------
ValueError
If map_ids is not str or Iterable of str.
If compare_type is not str or Callable.
If compare_type is str and not one of {"continuous", "categorical", "probabilistic"}.
NotImplementedError
If compare_type is "probabilistic".
Returns
-------
pandas.DataFrame | dask.DataFrame
Agreement catalog.
"""
# unpack map_ids
if isinstance(map_ids, str):
candidate_map_ids, benchmark_map_ids = map_ids, map_ids
elif isinstance(map_ids, Iterable):
candidate_map_ids, benchmark_map_ids = map_ids
else:
raise ValueError("map_ids must be str or Iterable of str")
# set merge_kwargs to empty dict if None
if merge_kwargs is None:
merge_kwargs = dict()
# create agreement catalog
agreement_catalog = candidate_catalog.merge(
benchmark_catalog,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
suffixes=suffixes,
**merge_kwargs,
)
def compare_row(
row,
compare_type: str | Callable,
compare_kwargs: dict,
open_kwargs: dict,
agreement_map_field: str,
agreement_map_write_kwargs: dict,
) -> Tuple[xr.DataArray | xr.Dataset, pd.DataFrame]:
"""Compares catalog and benchmark maps by rows"""
def loadxr(map, open_kwargs):
"""load xarray object if not already"""
return (
map
if isinstance(map, (xr.DataArray, xr.Dataset))
else rxr_or(map, **open_kwargs)
)
# load maps
candidate_map = loadxr(row[candidate_map_ids + suffixes[0]], open_kwargs)
benchmark_map = loadxr(row[benchmark_map_ids + suffixes[1]], open_kwargs)
# set compare_kwargs to empty dict if None
if compare_kwargs is None:
compare_kwargs = dict()
# set agreement_map_write_kwargs to empty dict if None
if agreement_map_write_kwargs is None:
agreement_map_write_kwargs = dict()
if isinstance(compare_type, str):
if compare_type == "categorical":
results = candidate_map.gval.categorical_compare(
benchmark_map, **compare_kwargs
)
# results is a tuple of length 3 or 4
# agreement_map, crosstab_df, metrics_df, attrs_df = results
# where attrs_df is optional
agreement_map, metrics_df = results[0], results[2]
elif compare_type == "continuous":
results = candidate_map.gval.continuous_compare(
benchmark_map, **compare_kwargs
)
# results is a tuple of length 2 or 3
# agreement_map, metrics_df, attrs_df = results
# where attrs_df is optional
agreement_map, metrics_df = results[:2]
elif compare_type == "probabilistic":
raise NotImplementedError(
"probabilistic comparison not implemented yet"
)
else:
raise ValueError(
"compare_type of type str must be one of {'continuous', 'categorical', 'probabilistic'}"
)
elif isinstance(compare_type, Callable):
agreement_map, metrics_df = compare_type(
candidate_map, benchmark_map, **compare_kwargs
)
else:
raise ValueError("compare_type must be str or Callable")
# write agreement map to file
if (agreement_map_field is not None) & isinstance(
agreement_map, (xr.DataArray, xr.Dataset)
):
if isinstance(row[agreement_map_field], (str, os.PathLike)):
agreement_map.rio.to_raster(
row[agreement_map_field], **agreement_map_write_kwargs
)
return metrics_df
# make kwargs for dask apply
if isinstance(agreement_catalog, dd.DataFrame):
dask_kwargs = {"meta": ("output", "f8")}
else:
dask_kwargs = {}
# set open_kwargs to empty dict if None
if open_kwargs is None:
open_kwargs = dict()
# apply compare_row to each row of agreement_catalog
metrics_df = agreement_catalog.apply(
compare_row,
axis=1,
**dask_kwargs,
compare_type=compare_type,
open_kwargs=open_kwargs,
compare_kwargs=compare_kwargs,
agreement_map_field=agreement_map_field,
agreement_map_write_kwargs=agreement_map_write_kwargs,
)
def nested_merge(i, sub_df) -> pd.DataFrame:
"""Duplicated agreement row for each row in sub_df"""
try:
agreement_row = agreement_catalog.iloc[i].to_frame().T
except NotImplementedError:
agreement_row = agreement_catalog.loc[agreement_catalog.index == i]
sub_df.index = [i] * len(sub_df)
return agreement_row.join(sub_df)
# merge agreement_catalog with metrics_df
if isinstance(metrics_df, dd.Series):
return dd.concat(
[nested_merge(i, sub_df) for i, sub_df in enumerate(metrics_df)]
).reset_index(drop=True)
if isinstance(metrics_df, pd.Series):
return pd.concat(
[nested_merge(i, sub_df) for i, sub_df in enumerate(metrics_df)]
).reset_index(drop=True)
|
NOAA-OWP/gval
|
src/gval/catalogs/catalogs.py
|
catalogs.py
|
py
| 9,027 |
python
|
en
|
code
| 14 |
github-code
|
6
|
[
{
"api_name": "pandas.DataFrame",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "dask.dataframe.DataFrame",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "dask.dataframe",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "dask.dataframe.DataFrame",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "dask.dataframe",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 86,
"usage_type": "argument"
},
{
"api_name": "typing.Callable",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "xarray.DataArray",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "xarray.Dataset",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "rioxarray.open_rasterio",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "typing.Callable",
"line_number": 167,
"usage_type": "argument"
},
{
"api_name": "xarray.DataArray",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "xarray.Dataset",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "os.PathLike",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "typing.Tuple",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "xarray.DataArray",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "xarray.Dataset",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "dask.dataframe.DataFrame",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_name": "dask.dataframe",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 208,
"usage_type": "attribute"
},
{
"api_name": "dask.dataframe.Series",
"line_number": 219,
"usage_type": "attribute"
},
{
"api_name": "dask.dataframe",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "dask.dataframe.concat",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "dask.dataframe",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "pandas.Series",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "pandas.concat",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "dask.dataframe.DataFrame",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "dask.dataframe",
"line_number": 33,
"usage_type": "name"
}
] |
30138290765
|
# !/usr/local/python/bin/python
# -*- coding: utf-8 -*-
# (C) Wu Dong, 2020
# All rights reserved
# @Author: 'Wu Dong <[email protected]>'
# @Time: '2020-04-09 14:39'
""" 演示自定义响应类
"""
# sys
import json
# 3p
from flask import Flask
from pre_request import BaseResponse
from pre_request import pre, Rule
class CustomResponse(BaseResponse):
def __call__(self, fuzzy=False, formatter=None, error=None):
"""
:type error: 错误
:return:
"""
result = {
"code": error.code,
"rst": {}
}
from flask import make_response # pylint: disable=import-outside-toplevel
response = make_response(json.dumps(result))
response.headers["Content-Type"] = "application/json; charset=utf-8"
return response
app = Flask(__name__)
app.config["TESTING"] = True
filter_params = {
"email": Rule(email=True)
}
@app.route("/email", methods=['get', 'post'])
@pre.catch(filter_params)
def email_resp_handler(params):
""" 测试邮件验证
"""
return str(params)
if __name__ == "__main__":
pre.add_response(CustomResponse)
resp = app.test_client().get("/email", data={
"email": "wudong@@eastwu.cn"
})
print(resp.get_data(as_text=True))
|
Eastwu5788/pre-request
|
examples/example_flask/example_response.py
|
example_response.py
|
py
| 1,281 |
python
|
en
|
code
| 55 |
github-code
|
6
|
[
{
"api_name": "pre_request.BaseResponse",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "flask.make_response",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pre_request.Rule",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pre_request.pre.catch",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "pre_request.pre",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "pre_request.pre.add_response",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "pre_request.pre",
"line_number": 53,
"usage_type": "name"
}
] |
14594653005
|
import tensorflow as tf
import pathlib
import os
import cv2
import numpy as np
import tqdm
import argparse
class TFRecordsSeg:
def __init__(self,
image_dir="/datasets/custom/cityscapes",
label_dir="/datasets/custom/cityscapes",
tfrecord_path="data.tfrecords",
classes=34,
img_pattern="*.png",
label_pattern="*.png"):
"""
:param data_dir: the path to iam directory containing the subdirectories of xml and lines from iam dataset
:param tfrecord_path:
"""
# self.data_dir = data_dir
# self.labels_dir = os.path.join(data_dir, "gtFine/{}".format(split))
# self.image_dir = os.path.join(data_dir, "leftImg8bit/{}".format(split))
self.image_dir = image_dir
self.labels_dir = label_dir
self.tfrecord_path = tfrecord_path
self.labels = []
self.classes = classes
self.img_pattern = img_pattern
self.label_pattern = label_pattern
self.image_feature_description = \
{
'label': tf.io.FixedLenFeature([], tf.string),
'image': tf.io.FixedLenFeature([], tf.string)
}
@staticmethod
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))):
value = value.numpy() # BytesList won't unpack a string from an EagerTensor.
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
@staticmethod
def _float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
@staticmethod
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _parse_example_function(self, example_proto):
# Parse the input tf.Example proto using the dictionary above.
return tf.io.parse_example(example_proto, self.image_feature_description)
def image_example(self, image_string, label):
feature = {
'label': self._bytes_feature(label),
'image': self._bytes_feature(image_string)
}
return tf.train.Example(features=tf.train.Features(feature=feature))
def return_inst_cnts(self, inst_ex):
inst_cnt = np.zeros(inst_ex.shape)
for unique_class in np.unique(inst_ex):
inst_img = (inst_ex == unique_class) / 1
cnts, _ = cv2.findContours(inst_img.astype("uint8"), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
inst_cnt = cv2.drawContours(inst_cnt, cnts, -1, (1., 1., 1.), thickness=1)
return inst_cnt
def write_tfrecords(self, training=False, dataset_name=""):
img_paths = sorted(pathlib.Path(self.image_dir).rglob(self.img_pattern))
label_paths = sorted(pathlib.Path(self.labels_dir).rglob(self.label_pattern))
with tf.io.TFRecordWriter(self.tfrecord_path) as writer:
for img_path, label_path in tqdm.tqdm(zip(img_paths, label_paths)):
img_string = open(str(img_path), 'rb').read()
label_string = open(str(label_path), 'rb').read()
tf_example = self.image_example(img_string, label_string)
writer.write(tf_example.SerializeToString())
if training:
import json
if os.path.exists('{}/data_samples.json'.format(os.path.dirname(self.tfrecord_path))):
with open('{}/data_samples.json'.format(os.path.dirname(self.tfrecord_path))) as f:
data = json.load(f)
if dataset_name in list(data.keys()):
print("Dataset {} value was already present but value was updated".format(dataset_name))
else:
data = {}
data[dataset_name] = len(img_paths)
with open('{}/data_samples.json'.format(os.path.dirname(self.tfrecord_path)), 'w') as json_file:
json.dump(data, json_file)
def decode_strings(self, record):
images = tf.io.decode_jpeg(record['image'], 3)
labels = tf.io.decode_jpeg(record['label'], 3)
return images, labels
def read_tfrecords(self):
"""
Read iam tfrecords
:return: Returns a tuple of images and their label (images, labels)
"""
raw_dataset = tf.data.TFRecordDataset(self.tfrecord_path)
parsed_dataset = raw_dataset.map(self._parse_example_function)
decoded_dataset = parsed_dataset.map(self.decode_strings)
return decoded_dataset
if __name__ == "__main__":
classes = 150
dataset_name = "ade20k1"
train = TFRecordsSeg(image_dir="/volumes2/datasets/ADEChallengeData2016/images/training",
label_dir="/volumes2/datasets/ADEChallengeData2016/annotations/training",
tfrecord_path="/data/input/datasets/tf2_segmentation_tfrecords/{}_train.tfrecords".format(dataset_name),
classes=classes, img_pattern="*.jpg",
label_pattern="*.png")
# train = TFRecordsSeg(data_dir="/data/input/datasets/cityscape_processed", tfrecord_path="/volumes1/train.tfrecords", split='train')
val = TFRecordsSeg(image_dir="/volumes2/datasets/ADEChallengeData2016/images/validation",
label_dir="/volumes2/datasets/ADEChallengeData2016/annotations/validation",
tfrecord_path="/data/input/datasets/tf2_segmentation_tfrecords/{}_val.tfrecords".format(dataset_name),
classes=classes, img_pattern="*.jpg",
label_pattern="*.png")
train.write_tfrecords(training=True, dataset_name=dataset_name)
val.write_tfrecords()
# example = train
# image_dataset = example.read_tfrecords().repeat(10)
# cv2.namedWindow("img", 0)
# cv2.namedWindow("label", 0)
# for image_features in image_dataset:
# img = image_features[0][..., ::-1]
# label = image_features[1]
# print(np.unique(label.numpy()))
# insts = image_features[2]
# cv2.imshow("img", img.numpy())
# cv2.imshow("label", label.numpy()/classes)
# cv2.waitKey()
# print(image_features[0].shape, image_features[1].shape, image_features[2].shape)
# example.write_tfrecords()
# image_dataset = example.read_tfrecords().shuffle(10000)
#
# for image_features in image_dataset.take(10):
# print(image_features[0].shape, image_features[1].numpy())
|
AhmedBadar512/Badr_AI_Repo
|
utils/create_seg_tfrecords.py
|
create_seg_tfrecords.py
|
py
| 6,714 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "tensorflow.io.FixedLenFeature",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "tensorflow.io",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.string",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.io.FixedLenFeature",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "tensorflow.io",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.string",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.constant",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.Feature",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.BytesList",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.Feature",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.FloatList",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.Feature",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.Int64List",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "tensorflow.io.parse_example",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "tensorflow.io",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.Example",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.Features",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "cv2.findContours",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_TREE",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_NONE",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "cv2.drawContours",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "tensorflow.io.TFRecordWriter",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "tensorflow.io",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "tqdm.tqdm",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "tensorflow.io.decode_jpeg",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "tensorflow.io",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.io.decode_jpeg",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "tensorflow.io",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.data.TFRecordDataset",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "tensorflow.data",
"line_number": 107,
"usage_type": "attribute"
}
] |
30353219011
|
from os.path import abspath
from io import BytesIO
import copy
# Local imports.
from common import TestCase, get_example_data
class TestOptionalCollection(TestCase):
def test(self):
self.main()
def do(self):
############################################################
# Imports.
script = self.script
from mayavi.sources.vtk_file_reader import VTKFileReader
from mayavi.filters.contour import Contour
from mayavi.filters.optional import Optional
from mayavi.filters.collection import Collection
from mayavi.filters.api import PolyDataNormals
from mayavi.modules.api import Surface
############################################################
# Create a new scene and set up the visualization.
s = self.new_scene()
# Read a VTK (old style) data file.
r = VTKFileReader()
r.initialize(get_example_data('heart.vtk'))
script.add_source(r)
c = Contour()
# `name` is used for the notebook tabs.
n = PolyDataNormals(name='Normals')
o = Optional(filter=n, label_text='Compute normals')
coll = Collection(filters=[c, o], name='IsoSurface')
script.add_filter(coll)
s = Surface()
script.add_module(s)
########################################
# do the testing.
def check(coll):
"""Check if test status is OK given the collection."""
c, o = coll.filters
c = c.filter
n = o.filter
assert coll.get_output_dataset().point_data.scalars.range == (127.5, 127.5)
# Adding a contour should create the appropriate output in
# the collection.
c.contours.append(200)
assert coll.get_output_dataset().point_data.scalars.range == (127.5, 200.0)
# the collection's output should be that of the normals.
assert coll.get_output_dataset() is n.get_output_dataset()
# disable the optional filter and check.
o.enabled = False
assert 'disabled' in o.name
assert coll.get_output_dataset() is c.get_output_dataset()
# Set back everything to original state.
c.contours.pop()
o.enabled = True
assert coll.get_output_dataset().point_data.scalars.range == (127.5, 127.5)
assert coll.get_output_dataset() is n.get_output_dataset()
assert 'disabled' not in o.name
check(coll)
############################################################
# Test if saving a visualization and restoring it works.
# Save visualization.
f = BytesIO()
f.name = abspath('test.mv2') # We simulate a file.
script.save_visualization(f)
f.seek(0) # So we can read this saved data.
# Remove existing scene.
engine = script.engine
engine.close_scene(s)
# Load visualization
script.load_visualization(f)
s = engine.current_scene
# Now do the check.
coll = s.children[0].children[0]
check(coll)
############################################################
# Test if the Mayavi2 visualization can be deep-copied.
# Pop the source object.
source = s.children.pop()
# Add it back to see if that works without error.
s.children.append(source)
# Now do the check.
coll = s.children[0].children[0]
check(coll)
# Now deepcopy the source and replace the existing one with
# the copy. This basically simulates cutting/copying the
# object from the UI via the right-click menu on the tree
# view, and pasting the copy back.
source1 = copy.deepcopy(source)
s.children[0] = source1
# Now do the check.
coll = s.children[0].children[0]
check(coll)
# If we have come this far, we are golden!
if __name__ == "__main__":
t = TestOptionalCollection()
t.test()
|
enthought/mayavi
|
integrationtests/mayavi/test_optional_collection.py
|
test_optional_collection.py
|
py
| 4,072 |
python
|
en
|
code
| 1,177 |
github-code
|
6
|
[
{
"api_name": "common.TestCase",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "mayavi.sources.vtk_file_reader.VTKFileReader",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "common.get_example_data",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "mayavi.filters.contour.Contour",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "mayavi.filters.api.PolyDataNormals",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "mayavi.filters.optional.Optional",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "mayavi.filters.collection.Collection",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "mayavi.modules.api.Surface",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "{'VTKFileReader': 'mayavi.sources.vtk_file_reader.VTKFileReader', 'Contour': 'mayavi.filters.contour.Contour', 'Optional': 'mayavi.filters.optional.Optional', 'Collection': 'mayavi.filters.collection.Collection', 'PolyDataNormals': 'mayavi.filters.api.PolyDataNormals', 'Surface': 'mayavi.modules.api.Surface'}",
"line_number": 115,
"usage_type": "call"
}
] |
32161722151
|
import sys
from pathlib import Path
from colorama import Fore
sys.path.append(str(Path(__file__).parent.parent))
from g4f import BaseProvider, models, Provider
logging = False
class Styles:
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def main():
providers = get_providers()
failed_providers = []
for _provider in providers:
if _provider.needs_auth:
continue
print("Provider:", _provider.__name__)
result = test(_provider)
print("Result:", result)
if _provider.working and not result:
failed_providers.append(_provider)
print()
if failed_providers:
print(f"{Fore.RED + Styles.BOLD}Failed providers:{Styles.ENDC}")
for _provider in failed_providers:
print(f"{Fore.RED}{_provider.__name__}")
else:
print(f"{Fore.GREEN + Styles.BOLD}All providers are working")
def get_providers() -> list[type[BaseProvider]]:
provider_names = dir(Provider)
ignore_names = [
"annotations",
"base_provider",
"BaseProvider",
"AsyncProvider",
"AsyncGeneratorProvider"
]
provider_names = [
provider_name
for provider_name in provider_names
if not provider_name.startswith("__") and provider_name not in ignore_names
]
return [getattr(Provider, provider_name) for provider_name in provider_names]
def create_response(_provider: type[BaseProvider]) -> str:
if _provider.supports_gpt_35_turbo:
model = models.gpt_35_turbo.name
elif _provider.supports_gpt_4:
model = models.gpt_4.name
else:
model = models.default.name
response = _provider.create_completion(
model=model,
messages=[{"role": "user", "content": "Hello, who are you? Answer in detail much as possible."}],
stream=False,
)
return "".join(response)
def test(_provider: type[BaseProvider]) -> bool:
try:
response = create_response(_provider)
assert type(response) is str
assert len(response) > 0
return response
except Exception as e:
if logging:
print(e)
return False
if __name__ == "__main__":
main()
|
dovgan-developer/discord-bot-g4f
|
testing/test_providers.py
|
test_providers.py
|
py
| 2,239 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "colorama.Fore.RED",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RED",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.GREEN",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "g4f.Provider",
"line_number": 40,
"usage_type": "argument"
},
{
"api_name": "g4f.Provider",
"line_number": 53,
"usage_type": "argument"
},
{
"api_name": "g4f.BaseProvider",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "g4f.BaseProvider",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "g4f.models.gpt_35_turbo",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "g4f.models",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "g4f.models.gpt_4",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "g4f.models",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "g4f.models.default",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "g4f.models",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "g4f.BaseProvider",
"line_number": 71,
"usage_type": "name"
}
] |
26304099314
|
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import get_object_or_404, redirect, render, reverse
from django.utils import timezone
from django.views import generic
from paypal.standard.forms import PayPalPaymentsForm
from django.http import HttpRequest, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from .forms import CheckoutForm
from .models import ProdukItem, OrderProdukItem, Order, AlamatPengiriman, Payment
class HomeListView(generic.ListView):
template_name = 'home.html'
queryset = ProdukItem.objects.all()
paginate_by = 4
class ContactView(generic.ListView):
template_name = 'kontak.html'
queryset = ProdukItem.objects.all()
paginate_by = 4
class ProductListView(generic.ListView):
template_name = 'list_produk.html'
queryset = ProdukItem.objects.all()
paginate_by = 4
class ProductDetailView(generic.DetailView):
template_name = 'product_detail.html'
queryset = ProdukItem.objects.all()
class CheckoutView(LoginRequiredMixin, generic.FormView):
def get(self, *args, **kwargs):
form = CheckoutForm()
try:
order = Order.objects.get(user=self.request.user, ordered=False)
if order.produk_items.count() == 0:
messages.warning(self.request, 'Belum ada belajaan yang Anda pesan, lanjutkan belanja')
return redirect('toko:home-produk-list')
except ObjectDoesNotExist:
order = {}
messages.warning(self.request, 'Belum ada belajaan yang Anda pesan, lanjutkan belanja')
return redirect('toko:home-produk-list')
context = {
'form': form,
'keranjang': order,
}
template_name = 'checkout.html'
return render(self.request, template_name, context)
def post(self, *args, **kwargs):
form = CheckoutForm(self.request.POST or None)
try:
order = Order.objects.get(user=self.request.user, ordered=False)
if form.is_valid():
alamat_1 = form.cleaned_data.get('alamat_1')
alamat_2 = form.cleaned_data.get('alamat_2')
negara = form.cleaned_data.get('negara')
kode_pos = form.cleaned_data.get('kode_pos')
opsi_pembayaran = form.cleaned_data.get('opsi_pembayaran')
alamat_pengiriman = AlamatPengiriman(
user=self.request.user,
alamat_1=alamat_1,
alamat_2=alamat_2,
negara=negara,
kode_pos=kode_pos,
)
alamat_pengiriman.save()
order.alamat_pengiriman = alamat_pengiriman
order.save()
if opsi_pembayaran == 'P':
return redirect('toko:payment', payment_method='paypal')
else:
return redirect('toko:payment', payment_method='stripe')
messages.warning(self.request, 'Gagal checkout')
return redirect('toko:checkout')
except ObjectDoesNotExist:
messages.error(self.request, 'Tidak ada pesanan yang aktif')
return redirect('toko:order-summary')
class PaymentView(LoginRequiredMixin, generic.FormView):
def get(self, *args, **kwargs):
template_name = 'payment.html'
try:
order = Order.objects.get(user=self.request.user, ordered=False)
paypal_data = {
'business': settings.PAYPAL_RECEIVER_EMAIL,
'amount': order.get_total_harga_order,
'item_name': f'Pembayaran belajanan order: {order.id}',
'invoice': f'{order.id}-{timezone.now().timestamp()}' ,
'currency_code': 'USD',
'notify_url': self.request.build_absolute_uri(reverse('paypal-ipn')),
'return_url': self.request.build_absolute_uri(reverse('toko:paypal-return')),
'cancel_return': self.request.build_absolute_uri(reverse('toko:paypal-cancel')),
}
qPath = self.request.get_full_path()
isPaypal = 'paypal' in qPath
form = PayPalPaymentsForm(initial=paypal_data)
context = {
'paypalform': form,
'order': order,
'is_paypal': isPaypal,
}
return render(self.request, template_name, context)
except ObjectDoesNotExist:
return redirect('toko:checkout')
class OrderSummaryView(LoginRequiredMixin, generic.TemplateView):
def get(self, *args, **kwargs):
try:
order = Order.objects.get(user=self.request.user, ordered=False)
context = {
'keranjang': order
}
template_name = 'order_summary.html'
return render(self.request, template_name, context)
except ObjectDoesNotExist:
messages.error(self.request, 'Tidak ada pesanan yang aktif')
return redirect('/')
def add_to_cart(request, slug):
if request.user.is_authenticated:
produk_item = get_object_or_404(ProdukItem, slug=slug)
order_produk_item, _ = OrderProdukItem.objects.get_or_create(
produk_item=produk_item,
user=request.user,
ordered=False
)
order_query = Order.objects.filter(user=request.user, ordered=False)
if order_query.exists():
order = order_query[0]
if order.produk_items.filter(produk_item__slug=produk_item.slug).exists():
order_produk_item.quantity += 1
order_produk_item.save()
pesan = f"ProdukItem sudah diupdate menjadi: { order_produk_item.quantity }"
messages.info(request, pesan)
return redirect('toko:produk-detail', slug = slug)
else:
order.produk_items.add(order_produk_item)
messages.info(request, 'ProdukItem pilihanmu sudah ditambahkan')
return redirect('toko:produk-detail', slug = slug)
else:
tanggal_order = timezone.now()
order = Order.objects.create(user=request.user, tanggal_order=tanggal_order)
order.produk_items.add(order_produk_item)
messages.info(request, 'ProdukItem pilihanmu sudah ditambahkan')
return redirect('toko:produk-detail', slug = slug)
else:
return redirect('/accounts/login')
def remove_from_cart(request, slug):
if request.user.is_authenticated:
produk_item = get_object_or_404(ProdukItem, slug=slug)
order_query = Order.objects.filter(
user=request.user, ordered=False
)
if order_query.exists():
order = order_query[0]
if order.produk_items.filter(produk_item__slug=produk_item.slug).exists():
try:
order_produk_item = OrderProdukItem.objects.filter(
produk_item=produk_item,
user=request.user,
ordered=False
)[0]
order.produk_items.remove(order_produk_item)
order_produk_item.delete()
pesan = f"ProdukItem sudah dihapus"
messages.info(request, pesan)
return redirect('toko:produk-detail',slug = slug)
except ObjectDoesNotExist:
print('Error: order ProdukItem sudah tidak ada')
else:
messages.info(request, 'ProdukItem tidak ada')
return redirect('toko:produk-detail',slug = slug)
else:
messages.info(request, 'ProdukItem tidak ada order yang aktif')
return redirect('toko:produk-detail',slug = slug)
else:
return redirect('/accounts/login')
# @csrf_exempt
def paypal_return(request):
if request.user.is_authenticated:
try:
print('paypal return', request)
order = Order.objects.get(user=request.user, ordered=False)
payment = Payment()
payment.user=request.user
payment.amount = order.get_total_harga_order()
payment.payment_option = 'P' # paypal kalai 'S' stripe
payment.charge_id = f'{order.id}-{timezone.now()}'
payment.timestamp = timezone.now()
payment.save()
order_produk_item = OrderProdukItem.objects.filter(user=request.user,ordered=False)
order_produk_item.update(ordered=True)
order.payment = payment
order.ordered = True
order.save()
messages.info(request, 'Pembayaran sudah diterima, terima kasih')
return redirect('toko:home-produk-list')
except ObjectDoesNotExist:
messages.error(request, 'Periksa kembali pesananmu')
return redirect('toko:order-summary')
else:
return redirect('/accounts/login')
# @csrf_exempt
def paypal_cancel(request):
messages.error(request, 'Pembayaran dibatalkan')
return redirect('toko:order-summary')
def filter_products(request):
filtered_products = None
selected_kategori = request.GET.getlist('kategori')
selected_tags = request.GET.getlist('tags')
if selected_kategori or selected_tags:
filtered_products = ProdukItem.objects.all()
if selected_kategori:
filtered_products = filtered_products.filter(kategori__in=selected_kategori)
if selected_tags:
filtered_products = filtered_products.filter(label__in=selected_tags)
else:
filtered_products = ProdukItem.objects.all()
return render(request, 'list_produk.html', {'object_list': filtered_products})
def pencarian_barang(request):
keyword = request.GET.get('keyword')
if keyword:
barang = ProdukItem.objects.filter(nama_produk__icontains=keyword)
else:
barang = None
return render(request, 'list_produk.html', {'object_list': barang})
def update_quantity(request: HttpRequest):
if request.method == 'POST' and request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest':
product_id = request.POST.get('product_id')
action = request.POST.get('action')
total = 0.0
hemat = 0.0
total_all = None
total_hemat = None
try:
product = OrderProdukItem.objects.get(id=product_id)
if action == 'increase':
product.quantity += 1
elif action == 'decrease':
if product.quantity > 1:
product.quantity -= 1
product.save()
if product.produk_item.harga_diskon:
total = product.get_total_harga_diskon_item()
hemat = product.get_total_hemat_item()
else :
total = product.get_total_harga_item()
return JsonResponse({'quantity': product.quantity, 'total':total, 'hemat':hemat})
except OrderProdukItem.DoesNotExist:
return JsonResponse({'error': 'Product not found'}, status=400)
return JsonResponse({'error': 'Invalid request'}, status=400)
def reduce_from_cart(request, slug):
if request.user.is_authenticated:
produk_item = get_object_or_404(ProdukItem, slug=slug)
order_produk_item, _ = OrderProdukItem.objects.get_or_create(
produk_item=produk_item,
user=request.user,
ordered=False
)
order_query = Order.objects.filter(user=request.user, ordered=False)
if order_query.exists():
order = order_query[0]
if order.produk_items.filter(produk_item__slug=produk_item.slug).exists():
if order_produk_item.quantity > 1 :
order_produk_item.quantity -= 1
order_produk_item.save()
pesan = f"ProdukItem sudah diupdate menjadi: { order_produk_item.quantity }"
messages.info(request, pesan)
else:
pesan = f"Produk Item tidak bisa di update"
messages.warning(request, pesan)
return redirect('toko:produk-detail', slug = slug)
else:
messages.info(request, 'ProdukItem pilihanmu tidak ada pada keranjang')
return redirect('toko:produk-detail', slug = slug)
else:
messages.info(request, 'ProdukItem pilihanmu tidak ada pada keranjang')
return redirect('toko:produk-detail', slug = slug)
else:
return redirect('/accounts/login')
def cari_produk(request, kategori):
produk = ProdukItem.objects.filter(kategori=kategori)
return render(request, 'list_produk.html', {'object_list': produk})
# def update_cart(request, slug):
# def get(self, *args, **kwargs):
# if request.user.is_authenticated:
# produk_item = get_object_or_404(ProdukItem, slug=slug)
# order_produk_item, _ = OrderProdukItem.objects.get_or_create(
# produk_item=produk_item,
# user=request.user,
# ordered=False
# )
# order_query = Order.objects.filter(user=request.user, ordered=False)
# if order_query.exists():
# order = order_query[0]
# if order.produk_items.filter(produk_item__slug=produk_item.slug).exists():
# order_produk_item.quantity += 1
# order_produk_item.save()
# order = Order.objects.get(user=self.request.user, ordered=False)
# context = {
# 'keranjang': order
# }
# template_name = 'order_summary.html'
# return render(self.request, template_name, context)
# else:
# return redirect('/accounts/login')
|
ifty123/ecomm_fix
|
ecomm/toko/views.py
|
views.py
|
py
| 14,172 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.views.generic.ListView",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "models.ProdukItem.objects.all",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "models.ProdukItem.objects",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "models.ProdukItem",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.views.generic.ListView",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "models.ProdukItem.objects.all",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "models.ProdukItem.objects",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "models.ProdukItem",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.views.generic.ListView",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "models.ProdukItem.objects.all",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "models.ProdukItem.objects",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "models.ProdukItem",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "django.views.generic.DetailView",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "models.ProdukItem.objects.all",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "models.ProdukItem.objects",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "models.ProdukItem",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "django.views.generic.FormView",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "forms.CheckoutForm",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "models.Order.objects.get",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "models.Order.objects",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "models.Order",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.warning",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "django.core.exceptions.ObjectDoesNotExist",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.warning",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "forms.CheckoutForm",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "models.Order.objects.get",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "models.Order.objects",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "models.Order",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "models.AlamatPengiriman",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.warning",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "django.core.exceptions.ObjectDoesNotExist",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "django.views.generic.FormView",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "models.Order.objects.get",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "models.Order.objects",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "models.Order",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.PAYPAL_RECEIVER_EMAIL",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.reverse",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.reverse",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.reverse",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "paypal.standard.forms.PayPalPaymentsForm",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "django.core.exceptions.ObjectDoesNotExist",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "django.views.generic.TemplateView",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "models.Order.objects.get",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "models.Order.objects",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "models.Order",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "django.core.exceptions.ObjectDoesNotExist",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "models.ProdukItem",
"line_number": 133,
"usage_type": "argument"
},
{
"api_name": "models.OrderProdukItem.objects.get_or_create",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "models.OrderProdukItem.objects",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "models.OrderProdukItem",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "models.Order.objects.filter",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "models.Order.objects",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "models.Order",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.info",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.info",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "models.Order.objects.create",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "models.Order.objects",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "models.Order",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.info",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "models.ProdukItem",
"line_number": 163,
"usage_type": "argument"
},
{
"api_name": "models.Order.objects.filter",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "models.Order.objects",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "models.Order",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "models.OrderProdukItem.objects.filter",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "models.OrderProdukItem.objects",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "models.OrderProdukItem",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.info",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "django.core.exceptions.ObjectDoesNotExist",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.info",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.info",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "models.Order.objects.get",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "models.Order.objects",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "models.Order",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "models.Payment",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "models.OrderProdukItem.objects.filter",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "models.OrderProdukItem.objects",
"line_number": 208,
"usage_type": "attribute"
},
{
"api_name": "models.OrderProdukItem",
"line_number": 208,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.info",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "django.core.exceptions.ObjectDoesNotExist",
"line_number": 217,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "models.ProdukItem.objects.all",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "models.ProdukItem.objects",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "models.ProdukItem",
"line_number": 235,
"usage_type": "name"
},
{
"api_name": "models.ProdukItem.objects.all",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "models.ProdukItem.objects",
"line_number": 241,
"usage_type": "attribute"
},
{
"api_name": "models.ProdukItem",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "models.ProdukItem.objects.filter",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "models.ProdukItem.objects",
"line_number": 249,
"usage_type": "attribute"
},
{
"api_name": "models.ProdukItem",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "django.http.HttpRequest",
"line_number": 255,
"usage_type": "name"
},
{
"api_name": "models.OrderProdukItem.objects.get",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "models.OrderProdukItem.objects",
"line_number": 265,
"usage_type": "attribute"
},
{
"api_name": "models.OrderProdukItem",
"line_number": 265,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "models.OrderProdukItem.DoesNotExist",
"line_number": 280,
"usage_type": "attribute"
},
{
"api_name": "models.OrderProdukItem",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "models.ProdukItem",
"line_number": 286,
"usage_type": "argument"
},
{
"api_name": "models.OrderProdukItem.objects.get_or_create",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "models.OrderProdukItem.objects",
"line_number": 287,
"usage_type": "attribute"
},
{
"api_name": "models.OrderProdukItem",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "models.Order.objects.filter",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "models.Order.objects",
"line_number": 292,
"usage_type": "attribute"
},
{
"api_name": "models.Order",
"line_number": 292,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.info",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 300,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.warning",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 303,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.info",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 306,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.info",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 309,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "models.ProdukItem.objects.filter",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "models.ProdukItem.objects",
"line_number": 315,
"usage_type": "attribute"
},
{
"api_name": "models.ProdukItem",
"line_number": 315,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 316,
"usage_type": "call"
}
] |
24742947009
|
from asyncirc import irc
import asyncirc.plugins.sasl
import asyncio, configparser, time, sys
config = configparser.ConfigParser(interpolation=None)
config.read('config.ini')
network = config["DEFAULT"]["network"]
server = config[network]["server"]
port = config[network]["port"]
nick = config[network]['nick']
password = config[network]['password']
conn = irc.connect(server, port, use_ssl=True)
conn.register(nick, nick, nick)
asyncirc.plugins.sasl.auth(bot_nick, bot_password)
nicks_to_renew = []
nick_to_try = ""
@conn.on("irc-001")
def query_for_nicks(message):
print("Querying NickServ for list of nicks")
conn.say("NickServ", "info")
@conn.on("private-notice")
def extract_nicks(message, user, target, text):
if message.source != "NickServ!NickServ@services.":
print("Notice from user {}: {}".format(user.user, text))
return
if text.startswith("Nicks"):
global nicks_to_renew
nicks = text.split(":", 1)[1].strip()
nicks_to_renew += [nick for nick in nicks.split()
if nick != bot_nick]
print("Added `{}' to list of nicks".format(nicks))
elif "End of Info" in text:
# Run the first renew try at the end of the nickserv info
renew_next()
@conn.on("irc-nick")
def renew_next(message=""):
# Sleep 5 seconds before trying to renew a nick, due to nick changing rate limiting
time.sleep(5)
try:
global nick_to_try
nick_to_try = nicks_to_renew.pop()
except IndexError:
# Exit when we have no more nicks to renew
print("All nicks renewed. Exiting...")
conn.anything("QUIT :Done...")
sys.exit(0)
print("Trying to renew nick `{}'".format(nick_to_try))
conn.writeln("NICK {}".format(nick_to_try))
@conn.on("irc-433")
def nick_in_use(message):
print("Nickname `{}' is already in use. Skipping...".format(nick_to_try))
renew_next()
@conn.on("irc-437")
def nick_unavailable(message):
print("Nick `{}' is marked temporarily unavailable, releasing it...".format(nick_to_try))
conn.say("NickServ", "RELEASE {}".format(nick_to_try))
print("Retrying renew of `{}'".format(nick_to_try))
global nicks_to_renew
nicks_to_renew.append(nick_to_try)
renew_next()
@conn.on("irc-438")
def nick_change_ratelimit(message):
global nicks_to_renew
nicks_to_renew.append(nick_to_try)
print("Nick changing was rate limited, waiting 20 seconds")
time.sleep(20)
print("Nick changing resuming")
renew_next()
if __name__ == '__main__':
asyncio.get_event_loop().run_forever()
|
kyrias/reclaimer
|
reclaimer.py
|
reclaimer.py
|
py
| 2,611 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "configparser.ConfigParser",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "asyncirc.irc.connect",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "asyncirc.irc",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "asyncirc.plugins.sasl.auth",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "asyncirc.plugins",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 90,
"usage_type": "call"
}
] |
11301162272
|
from rest_framework.response import Response
from rest_framework.decorators import api_view
from datetime import datetime
from coupon.models import Coupon
from coupon.serializers import CouponSerializer
@api_view(['GET'])
def get_coupons(request):
user_id = request.GET.get('user_id')
expired = request.GET.get('expired')
page = request.GET.get('page')
limit = request.GET.get('limit')
if not user_id:
return Response({'success': False, 'message': '...'})
if not page:
page = 1
if not limit:
limit = 5
page = int(page)
limit = int(limit)
start = (page - 1) * limit
if not expired:
coupons = Coupon.objects.filter(user_id=user_id, expire__time__gte=datetime.now()).order_by('expire_time')[start: start + limit]
else:
coupons = Coupon.objects.filter(user_id=user_id).order_by('expire_time')[start: start + limit]
serializer = CouponSerializer(coupons, many=True)
return Response({'success': True, 'message': '成功', 'data': serializer.data})
|
jpswing/assmovie
|
coupon/views.py
|
views.py
|
py
| 1,042 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "rest_framework.response.Response",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "coupon.models.Coupon.objects.filter",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "coupon.models.Coupon.objects",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "coupon.models.Coupon",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "coupon.models.Coupon.objects.filter",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "coupon.models.Coupon.objects",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "coupon.models.Coupon",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "coupon.serializers.CouponSerializer",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 8,
"usage_type": "call"
}
] |
14069562246
|
''''
Microbial growth model for A. Niger
including inhibition dynamics based on Haldane's equation
'''
##############################################################################
mic_name = 'A. niger'
print( '\n'*2, 'Summary of params used for species ', mic_name)
# Imports
from inhibition import load_csv
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import odeint
from lmfit import Parameters, fit_report, minimize
from inhibition import plot_inhibition_curves, haldane_3_products
from control import show_fig
from control import fit_report_toggle
#######################################################################################
# Import dataset to fit model parameters:
# Inlcude, biomass optimal density and Cyanide concentration over time
# Extract required variables from measured data and carry out conversion
# Load measure data
measured_data, header = load_csv( 'CETIM - A niger data 1')
print('\nRaw measured data')
print(header, measured_data)
# Extract states
states_m = measured_data[:, 1:4] # states measured
state_names = header[1:4]
print('\nRaw extracted states')
print(state_names, '\n', states_m)
# Extract times at which to evalutate the solution of the ODE system
times_m = measured_data[:, 0]
print('\nMeasurement times')
print(header[0], times_m)
# Data cleaning
times_m = times_m[3:-1] - times_m[3]
states_m = states_m[3:-1,:]
# Set initial states
innoculum_size_0 = 1e5 #1.3e8
conversion_factor_IS = 1e-8 # # grams/cell
cX_0 = innoculum_size_0 * conversion_factor_IS
print('\nInitial measured states')
initial_states = [ cX_0, 25, *states_m[0,:] ] # 5 g glycine
print(initial_states)
# Data cleaning
# for ax in range(0,1):
# states_m = np.delete( states_m, [1, 2], ax )
# times_m = np.delete( times_m, [1, 2], ax )
#######################################################################################
# Build model and define regression function
# Define model for parameter fitting
# def monod(f, t, umax, Ks, Yps, Yxs):
# X = f[0]
# S = f[1]
# P = f[2]
# u = umax * (S / (Ks + S))
# dXdt = u * X
# dSdt = -dXdt / Yxs
# dPdt = (-dSdt) * Yps
# dfdt = [dXdt, dSdt, dPdt]
# return dfdt
def monod( f, t, *args ):
'''
System of differential equations for:
1) Biomass production, x (Monod dynamics assumed)
2) Substrate consumption, s
3) Organic acid production, p
pgl -> gluconic acid
pox -> oxalic acid
pci -> citric acid
'''
# Element-wise unpacking of vectorised solution, f
x = f[0]
s = f[1]
if s <= 0:
return np.zeros(5)
else:
# Biomass production rate
dxdt = args[0]*( s / (args[1] + s) ) * x
# Substrate consumption rate
dsdt = - args[2] * dxdt # - args[3] * x
# Acid production rates
dpdt = [ - args[i] * dsdt for i in [3, 4, 5] ]
# Return ODE system
return [dxdt, dsdt, *dpdt]
# Set model params
umax = 0.18 #/h
Ks = 62.24 # #g/L
Yxs = 8.51
Yps_gluc_1 = 0.003
# Yps_gluc_2 = 0.4
Yps_oxal_1 = 0.4
# Yps_oxal_2 = 0.2
Yps_citr_1 = 0.06
# Yps_citr_2 = 0.02
params = Parameters()
params.add(name='umax', value= umax, min=0, vary=False)
params.add(name='Ks', value= Ks, min=0, vary=False)
params.add(name='Yxs', value= Yxs, min=0, vary=True)
params.add(name='Yps_gluc_1', value=Yps_gluc_1, vary=True)
# params.add(name='Yps_gluc_2', value=Yps_gluc_2, min=0, vary=True)
params.add(name='Yps_oxal_1', value=Yps_oxal_1, min=0, vary=True)
# params.add(name='Yps_oxal_2', value=Yps_oxal_2, min=0, vary=True)
params.add(name='Yps_citr_1', value=Yps_citr_1, min=0, vary=True)
# params.add(name='Yps_citr_2', value=Yps_citr_2, min=0, vary=True)
# Define regression
def regress( params ):
# Unpack params
umax = params['umax'].value
Ks = params['Ks'].value
Yxs = params['Yxs'].value
Yps_gluc_1 = params['Yps_gluc_1'].value
# Yps_gluc_2 = params['Yps_gluc_2'].value
Yps_oxal_1 = params['Yps_oxal_1'].value
# Yps_oxal_2 = params['Yps_oxal_2'].value
Yps_citr_1 = params['Yps_citr_1'].value
# Yps_citr_2 = params['Yps_citr_2'].value
args = ( umax, Ks, Yxs, Yps_gluc_1, Yps_oxal_1, Yps_citr_1 )
# Model prediction
c = odeint(monod, initial_states, times_m, args=args)
cX = c[:, 0]
# cS = c[:, 1]
cP0 = c[:, -3] # Gluconic
cP1 = c[:, -2] # Oxalic
cP2 = c[:, -1] # Citric
del c
weight = [1, 1, 10000, 10000, 10000]
# Compute error
I = ( states_m[:, 0] - cP0 )**2 + ( states_m[:, 1] - cP1 )**2 + (( states_m[:, 2] - cP2)*weight )**2
return I
# #######################################################################################
# Fit model parameters to measured data
# Minimise
method = 'Nelder'
result = minimize(regress, params, method=method)
result.params.pretty_print()
if fit_report_toggle:
print(fit_report(result))
# Redefine fitted model params
umax = result.params['umax'].value
Ks = result.params['Ks'].value
Yxs = result.params['Yxs'].value
Yps_gluc_1 = params['Yps_gluc_1'].value
# Yps_gluc_2 = params['Yps_gluc_2'].value
Yps_oxal_1 = params['Yps_oxal_1'].value
# Yps_oxal_2 = params['Yps_oxal_2'].value
Yps_citr_1 = params['Yps_citr_1'].value
# Yps_citr_2 = params['Yps_citr_2'].value
# args = (umax, Ks, Yxs, Yps_gluc_1, Yps_gluc_2, Yps_oxal_1, Yps_oxal_2, Yps_citr_1, Yps_citr_2)
args = (umax, Ks, Yxs, Yps_gluc_1, Yps_oxal_1, Yps_citr_1)
#######################################################################################
# Plot inhibition curves
xvline = 24
times_p = sorted( np.concatenate( ([xvline], np.linspace(1e-5, 300, 400)) ) )
Kis = np.array( [12.2] ) # [2, 3, 5, 10])
c_monod = odeint(monod, initial_states, times_p, args=args)
cX_no_inhib = c_monod[:,0] # Biomass concentration
cS_no_inhib = c_monod[:,1] # Substrate concentration
cP_no_inhib_1 = c_monod[:,2] # Product concentration
cP_no_inhib_2 = c_monod[:,3] # Product concentration
cP_no_inhib_3 = c_monod[:,4] # Product concentration
mic_name_1 = mic_name + ' (gluconic acid)'
mic_name_2 = mic_name + ' (oxalic acid)'
mic_name_3 = mic_name + ' (citric acid)'
# Plot biomass and sub. no inhibition curves
plot_inhibition_curves(
times_p,
initial_states,
[],
args,
haldane_3_products,
mic_name,
cX_no_inhib=cX_no_inhib,
cS_no_inhib=cS_no_inhib,
# cP_no_inhib=cP_no_inhib_1,
# xvline=xvline,
show_fig=show_fig,
# cX_measured=Xy,
# cS_measured=Sy,
# cP_measured=states_m[:,0],
# measurement_times=times_m
)
# Plot product no inhibition curve 1
plot_inhibition_curves(
times_p,
initial_states,
[],
args,
haldane_3_products,
mic_name_1,
# cX_no_inhib=cX_no_inhib,
# cS_no_inhib=cS_no_inhib,
cP_no_inhib=cP_no_inhib_1,
# xvline=xvline,
show_fig=show_fig,
# cX_measured=Xy,
# cS_measured=Sy,
cP_measured=states_m[:,0],
measurement_times=times_m,
cP_index=2
)
# Plot product no inhibition curve 2
plot_inhibition_curves(
times_p,
initial_states,
[],
args,
haldane_3_products,
mic_name_2,
# cX_no_inhib=cX_no_inhib,
# cS_no_inhib=cS_no_inhib,
cP_no_inhib=cP_no_inhib_2,
# xvline=xvline,
show_fig=show_fig,
# cX_measured=Xy,
# cS_measured=Sy,
cP_measured=states_m[:,1],
measurement_times=times_m,
cP_index=3
)
# Plot product no inhibition curve 3
plot_inhibition_curves(
times_p,
initial_states,
[],
args,
haldane_3_products,
mic_name_3,
# cX_no_inhib=cX_no_inhib,
# cS_no_inhib=cS_no_inhib,
cP_no_inhib=cP_no_inhib_3,
# xvline=xvline,
show_fig=show_fig,
# cX_measured=Xy,
# cS_measured=Sy,
cP_measured=states_m[:,2],
measurement_times=times_m,
cP_index=4
)
#################################################################################
# Plot biomass and sub. inhibition curves
plot_inhibition_curves(
times_p,
initial_states,
Kis,
args,
haldane_3_products,
mic_name,
cX_no_inhib=cX_no_inhib,
cS_no_inhib=cS_no_inhib,
# cP_no_inhib=cP_no_inhib_1,
# xvline=xvline,
show_fig=show_fig,
# cX_measured=Xy,
# cS_measured=Sy,
# cP_measured=states_m[:,0],
# measurement_times=times_m
)
# Plot product inhibition curve 1
plot_inhibition_curves(
times_p,
initial_states,
Kis,
args,
haldane_3_products,
mic_name_1,
# cX_no_inhib=cX_no_inhib,
# cS_no_inhib=cS_no_inhib,
cP_no_inhib=cP_no_inhib_1,
# xvline=xvline,
show_fig=show_fig,
# cX_measured=Xy,
# cS_measured=Sy,
cP_measured=states_m[:,0],
measurement_times=times_m,
cP_index=2
)
# Plot product inhibition curve 2
plot_inhibition_curves(
times_p,
initial_states,
Kis,
args,
haldane_3_products,
mic_name_2,
# cX_no_inhib=cX_no_inhib,
# cS_no_inhib=cS_no_inhib,
cP_no_inhib=cP_no_inhib_2,
# xvline=xvline,
show_fig=show_fig,
# cX_measured=Xy,
# cS_measured=Sy,
cP_measured=states_m[:,1],
measurement_times=times_m,
cP_index=3
)
# Plot product inhibition curve 3
plot_inhibition_curves(
times_p,
initial_states,
Kis,
args,
haldane_3_products,
mic_name_3,
# cX_no_inhib=cX_no_inhib,
# cS_no_inhib=cS_no_inhib,
cP_no_inhib=cP_no_inhib_3,
# xvline=xvline,
show_fig=show_fig,
# cX_measured=Xy,
# cS_measured=Sy,
cP_measured=states_m[:,2],
measurement_times=times_m,
cP_index=4
)
|
TheoBatik/microbial_models
|
5b_A_niger.py
|
5b_A_niger.py
|
py
| 9,887 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "inhibition.load_csv",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "lmfit.Parameters",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.odeint",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "lmfit.minimize",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "control.fit_report_toggle",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "lmfit.fit_report",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.odeint",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "inhibition.plot_inhibition_curves",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "inhibition.haldane_3_products",
"line_number": 215,
"usage_type": "argument"
},
{
"api_name": "control.show_fig",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "inhibition.plot_inhibition_curves",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "inhibition.haldane_3_products",
"line_number": 235,
"usage_type": "argument"
},
{
"api_name": "control.show_fig",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "inhibition.plot_inhibition_curves",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "inhibition.haldane_3_products",
"line_number": 256,
"usage_type": "argument"
},
{
"api_name": "control.show_fig",
"line_number": 262,
"usage_type": "name"
},
{
"api_name": "inhibition.plot_inhibition_curves",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "inhibition.haldane_3_products",
"line_number": 277,
"usage_type": "argument"
},
{
"api_name": "control.show_fig",
"line_number": 283,
"usage_type": "name"
},
{
"api_name": "inhibition.plot_inhibition_curves",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "inhibition.haldane_3_products",
"line_number": 301,
"usage_type": "argument"
},
{
"api_name": "control.show_fig",
"line_number": 307,
"usage_type": "name"
},
{
"api_name": "inhibition.plot_inhibition_curves",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "inhibition.haldane_3_products",
"line_number": 321,
"usage_type": "argument"
},
{
"api_name": "control.show_fig",
"line_number": 327,
"usage_type": "name"
},
{
"api_name": "inhibition.plot_inhibition_curves",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "inhibition.haldane_3_products",
"line_number": 342,
"usage_type": "argument"
},
{
"api_name": "control.show_fig",
"line_number": 348,
"usage_type": "name"
},
{
"api_name": "inhibition.plot_inhibition_curves",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "inhibition.haldane_3_products",
"line_number": 363,
"usage_type": "argument"
},
{
"api_name": "control.show_fig",
"line_number": 369,
"usage_type": "name"
}
] |
72033875709
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('forestfires.csv')
pd.plotting.scatter_matrix(dataset)
X = dataset.iloc[:,0:12].values
y = dataset.iloc[:,-1].values
dataset.isnull().sum()
dataset.info()
temp = pd.DataFrame(X[:,[2,3]])
temp_month = pd.get_dummies(temp[0])
temp_day = pd.get_dummies(temp[1])
del(temp)
X = np.append(X,temp_month,axis = 1)
X = np.append(X,temp_day,axis = 1)
X = np.delete(X,2,axis =1)
X = np.delete(X,2,axis =1)
del(temp_month,temp_day)
temp = pd.DataFrame(X[:,:])
from sklearn.preprocessing import StandardScaler
st = StandardScaler()
X = st.fit_transform(X)
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y)
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(X_train,y_train)
lr.score(X_test,y_test)
from sklearn.ensemble import RandomForestRegressor
ran = RandomForestRegressor(n_estimators = 5)
ran.fit(X_train,y_train)
ran.score(X_train,y_train)
#this is complete
|
Manavendrasingh/ML-code
|
forestfire.py
|
forestfire.py
|
py
| 1,103 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.plotting.scatter_matrix",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas.plotting",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.delete",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.delete",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.RandomForestRegressor",
"line_number": 45,
"usage_type": "call"
}
] |
7160469481
|
import skfuzzy as fuzz
from skfuzzy import control as ctrl
import numpy as np
import matplotlib.pyplot as plt
def v(d, a):
return np.sqrt((d * 9.81) / np.sin(2 * np.radians(a)))
def main():
x_distance = np.arange(1, 100, 5)
x_angle = np.arange(1, 90, 1)
distance = ctrl.Antecedent(x_distance, 'distance')
angle = ctrl.Antecedent(x_angle, 'angle')
velocity = ctrl.Consequent(np.arange(0, 100, 1), 'velocity')
distance.automf(3)
angle.automf(5)
velocity.automf(5)
# poor
# mediocre
# average
# decent
# good
rules = [
ctrl.Rule(distance['poor'], velocity['poor']),
ctrl.Rule(distance['average'] & (angle['mediocre'] | angle['average'] | angle['decent']), velocity['mediocre']),
ctrl.Rule(distance['average'] & (angle['poor'] | angle['good']), velocity['average']),
ctrl.Rule(distance['good'] & (angle['mediocre'] | angle['average'] | angle['decent']), velocity['mediocre']),
ctrl.Rule(distance['good'] & (angle['poor'] | angle['good']), velocity['good']),
]
velocity_ctrl = ctrl.ControlSystemSimulation(ctrl.ControlSystem(rules=rules))
mse = 0
i = 0
preds = []
for ang in x_angle:
for dst in x_distance:
i += 1
true = v(dst, ang)
velocity_ctrl.input['distance'] = dst
velocity_ctrl.input['angle'] = ang
velocity_ctrl.compute()
preds.append(velocity_ctrl.output['velocity'])
mse += (true - velocity_ctrl.output['velocity']) ** 2
mse /= i
print(f'MSE: {mse}')
X, Y = np.meshgrid(x_distance, x_angle)
Z = v(X, Y)
fig = plt.figure()
ax = fig.add_subplot(1, 2, 1, projection='3d')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
cmap='viridis', edgecolor='none')
ax.set_title('Prawdziwa funkcja mocu rzutu')
ax.set_xlabel('dystans')
ax.set_ylabel('kat')
ax.set_zlabel('moc rzutu')
Z = np.array(preds).reshape(Z.shape)
ax = fig.add_subplot(1, 2, 2, projection='3d')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
cmap='viridis', edgecolor='none')
ax.set_title('Predykcja funkcji mocu rzutu')
ax.set_xlabel('dystans')
ax.set_ylabel('kat')
ax.set_zlabel('moc rzutu')
plt.show()
if __name__ == '__main__':
main()
|
DonChaka/PSI
|
Fuzzy/fuzzy_easy.py
|
fuzzy_easy.py
|
py
| 2,354 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.sqrt",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "skfuzzy.control.Antecedent",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "skfuzzy.control",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "skfuzzy.control.Antecedent",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "skfuzzy.control",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "skfuzzy.control.Consequent",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "skfuzzy.control",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "skfuzzy.control.Rule",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "skfuzzy.control",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "skfuzzy.control.Rule",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "skfuzzy.control",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "skfuzzy.control.Rule",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "skfuzzy.control",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "skfuzzy.control.Rule",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "skfuzzy.control",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "skfuzzy.control.Rule",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "skfuzzy.control",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "skfuzzy.control.ControlSystemSimulation",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "skfuzzy.control",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "skfuzzy.control.ControlSystem",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 76,
"usage_type": "name"
}
] |
26042467106
|
from __future__ import annotations
import logging
from abc import ABCMeta
from dataclasses import dataclass
from pants.core.util_rules.environments import EnvironmentNameRequest
from pants.engine.environment import EnvironmentName
from pants.engine.fs import MergeDigests, Snapshot, Workspace
from pants.engine.goal import Goal, GoalSubsystem
from pants.engine.rules import Get, MultiGet, collect_rules, goal_rule, rule
from pants.engine.target import (
FieldSet,
NoApplicableTargetsBehavior,
TargetRootsToFieldSets,
TargetRootsToFieldSetsRequest,
)
from pants.engine.unions import UnionMembership, union
logger = logging.getLogger(__name__)
@union
class GenerateSnapshotsFieldSet(FieldSet, metaclass=ABCMeta):
"""The fields necessary to generate snapshots from a target."""
@dataclass(frozen=True)
class GenerateSnapshotsResult:
snapshot: Snapshot
@dataclass(frozen=True)
class EnvironmentAwareGenerateSnapshotsRequest:
"""Request class to request a `GenerateSnapshotsResult` in an environment-aware fashion."""
field_set: GenerateSnapshotsFieldSet
@rule
async def environment_await_generate_snapshots(
request: EnvironmentAwareGenerateSnapshotsRequest,
) -> GenerateSnapshotsResult:
environment_name = await Get(
EnvironmentName,
EnvironmentNameRequest,
EnvironmentNameRequest.from_field_set(request.field_set),
)
result = await Get(
GenerateSnapshotsResult,
{request.field_set: GenerateSnapshotsFieldSet, environment_name: EnvironmentName},
)
return result
class GenerateSnapshotsSubsystem(GoalSubsystem):
name = "generate-snapshots"
help = "Generate test snapshots."
@classmethod
def activated(cls, union_membership: UnionMembership) -> bool:
return GenerateSnapshotsFieldSet in union_membership
class GenerateSnapshots(Goal):
subsystem_cls = GenerateSnapshotsSubsystem
environment_behavior = Goal.EnvironmentBehavior.USES_ENVIRONMENTS
@goal_rule
async def generate_snapshots(workspace: Workspace) -> GenerateSnapshots:
target_roots_to_field_sets = await Get(
TargetRootsToFieldSets,
TargetRootsToFieldSetsRequest(
GenerateSnapshotsFieldSet,
goal_description=f"the `{GenerateSnapshotsSubsystem.name}` goal",
no_applicable_targets_behavior=NoApplicableTargetsBehavior.error,
),
)
if not target_roots_to_field_sets.field_sets:
return GenerateSnapshots(exit_code=0)
snapshot_results = await MultiGet(
Get(GenerateSnapshotsResult, EnvironmentAwareGenerateSnapshotsRequest(field_set))
for field_set in target_roots_to_field_sets.field_sets
)
all_snapshots = await Get(
Snapshot, MergeDigests([result.snapshot.digest for result in snapshot_results])
)
workspace.write_digest(all_snapshots.digest)
for file in all_snapshots.files:
logger.info(f"Generated {file}")
return GenerateSnapshots(exit_code=0)
def rules():
return collect_rules()
|
pantsbuild/pants
|
src/python/pants/core/goals/generate_snapshots.py
|
generate_snapshots.py
|
py
| 3,031 |
python
|
en
|
code
| 2,896 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.FieldSet",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "abc.ABCMeta",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "pants.engine.unions.union",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pants.engine.fs.Snapshot",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.Get",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pants.engine.environment.EnvironmentName",
"line_number": 45,
"usage_type": "argument"
},
{
"api_name": "pants.core.util_rules.environments.EnvironmentNameRequest",
"line_number": 46,
"usage_type": "argument"
},
{
"api_name": "pants.core.util_rules.environments.EnvironmentNameRequest.from_field_set",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pants.core.util_rules.environments.EnvironmentNameRequest",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "pants.engine.rules.Get",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pants.engine.environment.EnvironmentName",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "pants.engine.rules.rule",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "pants.engine.goal.GoalSubsystem",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "pants.engine.unions.UnionMembership",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "pants.engine.goal.Goal",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "pants.engine.goal.Goal.EnvironmentBehavior",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "pants.engine.goal.Goal",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "pants.engine.fs.Workspace",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "pants.engine.rules.Get",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.TargetRootsToFieldSets",
"line_number": 73,
"usage_type": "argument"
},
{
"api_name": "pants.engine.target.TargetRootsToFieldSetsRequest",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.NoApplicableTargetsBehavior.error",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "pants.engine.target.NoApplicableTargetsBehavior",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "pants.engine.rules.MultiGet",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.Get",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.Get",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Snapshot",
"line_number": 90,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.MergeDigests",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.goal_rule",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "pants.engine.rules.collect_rules",
"line_number": 99,
"usage_type": "call"
}
] |
8963786234
|
#!/usr/bin/env python3
import multiprocessing
from queue import Empty
import subprocess
import Robocode
import os, os.path
from datetime import datetime
import sys
import time
# This class knows about Robocode and the Database.
def recommendedWorkers():
cpus = multiprocessing.cpu_count()
if cpus > 12:
return cpus-2
elif cpus > 6:
return cpus-1
else:
return cpus
def BattleWorker( robocode, battledb, job_q, result_q ):
print('[{who}] Started:\n {db}\n {robo}'.format(
who = multiprocessing.current_process().name,
db = battledb,
robo = robocode
), file=sys.stderr)
try:
while True:
battle = job_q.get()
if battle.__class__ != Robocode.Battle:
# sentinel: no more jobs
print('[{0}] EndOfWork!'.format(
multiprocessing.current_process().name,
), file=sys.stderr)
break
start_time = datetime.now()
try:
battledb.MarkBattleRunning(battle.id)
print('[{who}] Running battle {id} between: {comps}'.format(
who = multiprocessing.current_process().name,
id = battle.id,
comps = ' '.join(battle.competitors),
), file=sys.stderr)
battle.run()
print('[{who}] Finished: {id}'.format(
who = multiprocessing.current_process().name,
id = battle.id,
), file=sys.stderr)
except subprocess.CalledProcessError as e:
print('[{who}] Battle invocation fails: {exc}\n{output}'.format(
who = multiprocessing.current_process().name,
exc = e.cmd,
output = e.output,
), file=sys.stderr)
if not battle.error:
# Only record the data if the battle succeeded.
battledb.BattleCompleted(battle.id,
battle.dbData(),
battle.result.dbData())
elapsed = datetime.now() - start_time
result_q.put(battle.id)
except Exception as e:
print('[{who}] Exception: {exc}'.format(
who = multiprocessing.current_process().name,
exc = e,
), file=sys.stderr)
raise e
print('[{0}] Finished!'.format(
multiprocessing.current_process().name,
), file=sys.stderr)
class BattleRunner:
def __init__( self, battledb, robocode, maxWorkers=None ):
self.battledb = battledb
self.robocode = robocode
self.job_q = multiprocessing.JoinableQueue()
self.result_q = multiprocessing.JoinableQueue()
self.workers = maxWorkers if maxWorkers is not None else recommendedWorkers()
self.job_count = 0
def start( self ):
# Start the workers.
self.pool = [ multiprocessing.Process( target = BattleWorker,
args=(self.robocode, self.battledb,
self.job_q, self.result_q) )
for i in range(self.workers) ]
for p in self.pool:
p.start()
def finish( self ):
print('[{0}] Sending EndOfWork signals'.format(
multiprocessing.current_process().name,
), file=sys.stderr)
for p in self.pool:
self.job_q.put(0)
# Consume everything in the result_q
while self.job_count > 0:
battleid = self.result_q.get()
self.job_count -= 1
for p in self.pool:
p.join()
def submit( self, battle ):
print('[{0}] Submitting battle #{1} '.format(
multiprocessing.current_process().name,
battle.id,
), file=sys.stderr)
self.job_q.put(battle)
self.job_count += 1
def running(self):
'''
check to see if any of the workers are still running
'''
for p in self.pool:
if p.is_alive():
return True
return False
def getResults(self):
'''
check to see if there are any results
'''
results = []
try:
results.append(self.result_q.get_nowait())
except Empty:
pass
return results
|
mojomojomojo/di-arena
|
lib/BattleRunner.py
|
BattleRunner.py
|
py
| 4,617 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "multiprocessing.cpu_count",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "multiprocessing.current_process",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "Robocode.Battle",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing.current_process",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "multiprocessing.current_process",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing.current_process",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "subprocess.CalledProcessError",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing.current_process",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "multiprocessing.current_process",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing.current_process",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing.JoinableQueue",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "multiprocessing.JoinableQueue",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "multiprocessing.current_process",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing.current_process",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "queue.Empty",
"line_number": 146,
"usage_type": "name"
}
] |
29852066628
|
__author__ = "Rohit N Dubey"
from django.conf.urls import patterns, include, url
from django.contrib import admin
from views import Ignite
from . import prod
urlpatterns = patterns('',
url(r'^ui/(?P<path>.*)$', 'django.views.static.serve', { 'document_root': prod.UI_ROOT, }),
url(r'^api/pool/', include('pool.urls')),
url(r'^api/discoveryrule/', include('discoveryrule.urls')),
url(r'^api/configuration/', include('configuration.urls')),
# url(r'^api/usermanagement/', include('usermanagement.urls')),
url(r'^api/fabric/', include('fabric.urls')),
url(r'^api/resource/', include('resource.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^auth/', include('djoser.urls')),
url(r'^api/ignite', Ignite.as_view(), name='home'),
)
|
salran40/POAP
|
ignite/urls.py
|
urls.py
|
py
| 805 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.conf.urls.patterns",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "views.Ignite.as_view",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "views.Ignite",
"line_number": 22,
"usage_type": "name"
}
] |
11499299532
|
import requests,json
def ranking(duration="daily",ranking_type="break",offset=0,lim=20,unit=False):
try:
resp = requests.get(f'https://w4.minecraftserver.jp/api/ranking?type={ranking_type}k&offset={offset}&lim={lim}&duration={duration}')
data_json = json.loads(resp.text)
rank_list = list(data_json["ranks"])
rank = 1
for mcid_data in rank_list:
get_mcid = mcid_data["player"]
get_data = mcid_data["data"]
seichi_ryo = get_data["raw_data"]
name = get_mcid["name"]
if unit == True:
if len(str(seichi_ryo)) > 8:
seichi_ryo_kugiri0 = str(seichi_ryo)[-4:]
seichi_ryo_kugiri1 = str(seichi_ryo)[-8:-4]
seichi_ryo_kugiri2 = str(seichi_ryo)[:-8]
seichi_ryo = f"{seichi_ryo_kugiri2}億{seichi_ryo_kugiri1}万{seichi_ryo_kugiri0}"
elif len(str(seichi_ryo)) > 4:
seichi_ryo_kugiri0 = str(seichi_ryo)[-4:]
seichi_ryo_kugiri1 = str(seichi_ryo)[:-4]
seichi_ryo = seichi_ryo_kugiri1 + "万" + seichi_ryo_kugiri0
msg += f"{rank}位 {name} 整地量:{seichi_ryo}\n"
rank += 1
return msg
except:
text = "引数が無効または整地鯖APIが死んでます"
return text
def get_data(mcid=None,uuid=None,data_type="break",type_data_type="data"):
try:
if mcid != None:
resp = requests.get(f'https://api.mojang.com/users/profiles/minecraft/{mcid}')
data_json = json.loads(resp.text)
uuid_before = data_json["id"]
uuid = uuid_before[0:8]
uuid += "-"
uuid += uuid_before[8:12]
uuid += "-"
uuid += uuid_before[12:16]
uuid += "-"
uuid += uuid_before[16:20]
uuid += "-"
uuid += uuid_before[20:32]
print(uuid)
print(f'https://w4.minecraftserver.jp/api/ranking/player/{uuid}?types={data_type}')
resp = requests.get(f'https://w4.minecraftserver.jp/api/ranking/player/{uuid}?types={data_type}')
data_json = json.loads(resp.text)
if type_data_type == "data":
data = data_json[0]["data"]["raw_data"]
return data
if type_data_type == "lastquit":
return data_json[0]["lastquit"]
elif uuid != None:
resp = requests.get(f'https://w4.minecraftserver.jp/api/ranking/player/{uuid}?types={data_type}')
data_json = json.loads(resp.text)
if type_data_type == "data":
return data_json[0]["data"]["raw_data"]
if type_data_type == "lastquit":
return data_json[0]["lastquit"]
except:
text = "引数が無効または整地鯖APIが死んでます"
return text
#必須ライブラリ
#json
#reqests
#インストールコマンド
#py -m pip install json
#py -m pip install reqests
#私のdiscord鯖
#https://discord.gg/Gs7VXE
#私のdiscord垢
#neruhito#6113
#672910471279673358
|
nekorobi-0/seichi_ranking
|
seichi_ranking.py
|
seichi_ranking.py
|
py
| 3,146 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 55,
"usage_type": "call"
}
] |
73100458107
|
# Network Traffic Analyzer:
# Analyze network packet captures for anomalies and threats.
# pip install pyshark
'''
Python script that reads a Wireshark PCAP file and performs basic security analysis,
such as identifying suspicious traffic, detecting port scans, and checking for potential security threats.
The script uses the pyshark library to parse the PCAP file.
'''
import pyshark
def analyze_pcap(pcap_file):
# Create a PyShark capture object
capture = pyshark.FileCapture(pcap_file)
# Initialize variables for analysis
suspicious_traffic = 0
port_scan_detected = False
# Loop through each packet in the capture file
for packet in capture:
# Check for potential port scanning
if "TCP" in packet and int(packet["TCP"].dstport) < 1024:
port_scan_detected = True
# Add more checks for specific threats or anomalies as needed
# Analyze the results
if port_scan_detected:
print("Port scan detected in the network traffic.")
else:
print("No port scan detected.")
if suspicious_traffic > 0:
print(f"Detected {suspicious_traffic} suspicious packets in the network traffic.")
else:
print("No suspicious traffic detected.")
if __name__ == "__main__":
# Replace 'your_capture.pcap' with the path to your PCAP file
pcap_file_path = 'your_capture.pcap'
analyze_pcap(pcap_file_path)
|
Cnawel/greyhat-python
|
wireshark/traffice_analyzer.py
|
traffice_analyzer.py
|
py
| 1,415 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pyshark.FileCapture",
"line_number": 16,
"usage_type": "call"
}
] |
655282827
|
import argparse
import os
import torch
import torch_em
from torch_em.model import AnisotropicUNet
ROOT = '/scratch/pape/mito_em/data'
def get_loader(datasets, patch_shape,
batch_size=1, n_samples=None,
roi=None):
paths = [
os.path.join(ROOT, f'{ds}.n5') for ds in datasets
]
raw_key = 'raw'
label_key = 'labels'
sampler = torch_em.data.MinForegroundSampler(min_fraction=0.05, p_reject=.75)
label_transform = torch_em.transform.label.connected_components
return torch_em.default_segmentation_loader(
paths, raw_key,
paths, label_key,
batch_size=batch_size,
patch_shape=patch_shape,
label_transform=label_transform,
sampler=sampler,
n_samples=n_samples,
num_workers=8*batch_size,
shuffle=True,
label_dtype=torch.int64
)
def get_model(large_model):
n_out = 12
if large_model:
print("Using large model")
model = AnisotropicUNet(
scale_factors=[
[1, 2, 2],
[1, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2]
],
in_channels=1,
out_channels=n_out,
initial_features=128,
gain=2,
final_activation=None
)
else:
print("Using vanilla model")
model = AnisotropicUNet(
scale_factors=[
[1, 2, 2],
[1, 2, 2],
[2, 2, 2],
[2, 2, 2]
],
in_channels=1,
out_channels=n_out,
initial_features=64,
gain=2,
final_activation=None
)
return model
def train_embeddings(args, datasets):
large_model = bool(args.large_model)
model = get_model(large_model)
# patch shapes:
if large_model:
# largest possible shape for A100 with mixed training and large model
# patch_shape = [32, 320, 320]
patch_shape = [32, 256, 256]
else:
# largest possible shape for 2080Ti with mixed training
patch_shape = [24, 192, 192]
train_sets = [f'{ds}_train' for ds in datasets]
val_sets = [f'{ds}_val' for ds in datasets]
if args.train_on_val:
train_sets += val_sets
train_loader = get_loader(
datasets=train_sets,
patch_shape=patch_shape,
n_samples=1000
)
val_loader = get_loader(
datasets=val_sets,
patch_shape=patch_shape,
n_samples=100
)
loss = torch_em.loss.ContrastiveLoss(
delta_var=.75,
delta_dist=2.,
impl='scatter'
)
tag = 'large' if large_model else 'default'
if args.train_on_val:
tag += '_train_on_val'
name = f"embedding_model_{tag}_{'_'.join(datasets)}"
trainer = torch_em.default_segmentation_trainer(
name=name,
model=model,
train_loader=train_loader,
val_loader=val_loader,
loss=loss,
metric=loss,
learning_rate=5e-5,
mixed_precision=True,
log_image_interval=50
)
if args.from_checkpoint:
trainer.fit(args.iterations, 'latest')
else:
trainer.fit(args.iterations)
def check(datasets, train=True, val=True, n_images=5):
from torch_em.util.debug import check_loader
patch_shape = [32, 256, 256]
if train:
print("Check train loader")
dsets = [f'{ds}_train' for ds in datasets]
loader = get_loader(dsets, patch_shape)
check_loader(loader, n_images)
if val:
print("Check val loader")
dsets = [f'{ds}_val' for ds in datasets]
loader = get_loader(dsets, patch_shape)
check_loader(loader, n_images)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--datasets', '-d', type=str, nargs='+', default=['human', 'rat'])
parser.add_argument('--check', '-c', type=int, default=0)
parser.add_argument('--iterations', '-i', type=int, default=int(1e5))
parser.add_argument('--large_model', '-l', type=int, default=0)
parser.add_argument('--from_checkpoint', type=int, default=0)
parser.add_argument('--train_on_val', type=int, default=0)
dataset_names = ['human', 'rat']
args = parser.parse_args()
datasets = args.datasets
datasets.sort()
assert all(ds in dataset_names for ds in datasets)
if args.check:
check(datasets, train=True, val=True)
else:
train_embeddings(args, datasets)
|
constantinpape/torch-em
|
experiments/unet-segmentation/mitochondria-segmentation/mito-em/challenge/embeddings/train_embeddings.py
|
train_embeddings.py
|
py
| 4,556 |
python
|
en
|
code
| 42 |
github-code
|
6
|
[
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch_em.data.MinForegroundSampler",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch_em.data",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "torch_em.transform",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "torch_em.default_segmentation_loader",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.int64",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "torch_em.model.AnisotropicUNet",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch_em.model.AnisotropicUNet",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torch_em.loss.ContrastiveLoss",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "torch_em.loss",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "torch_em.default_segmentation_trainer",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "torch_em.util.debug.check_loader",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "torch_em.util.debug.check_loader",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 148,
"usage_type": "call"
}
] |
41858795618
|
#############################################################################################
# Foi feita uma estatística em cinco cidades brasileiras para coletar dados sobre acidentes #
# de trânsito. Foram obtidos os seguintes dados: #
# a) Código da cidade; #
# b) Número de veículos de passeio (em 1999); #
# c) Número de acidentes de trânsito com vítimas (em 1999). #
# Deseja-se saber: #
# d) Qual o maior e menor índice de acidentes de transito e a que cidade pertence; #
# e) Qual a média de veículos nas cinco cidades juntas; #
#############################################################################################
from datetime import date
maior = código_maior = menor = código_menor = carros = acidentes_2000 = média_acidentes = 0
nc_2000 = 1
for c in range(1, 6):
print('-' * 60)
# Solicita Código da cidade
código = int(input(f'Código da {c}ª cidade: '))
# Solicita Número de veículos de passeio
veículos = int(input(f'Número de veículos de passeio (em {date.today().year - 1}): '))
# Solicita úmero de acidentes de trânsito com vítimas
acidentes = int(input(f'Número de acidentes de trânsito com vítimas (em {date.today().year - 1}): '))
# Mostra o maior e menor índice de acidentes de transito e a que cidade pertence
if acidentes > maior:
maior = acidentes
código_maior = código
if código_menor == 0:
menor = acidentes
código_menor = código
if acidentes < menor:
menor = acidentes
código_menor = código
# Mostra a média de veículos nas cinco cidades juntas
carros += veículos
média_veículos = carros / c
print('-' * 60)
print(f"""O maior indíce de acidentes foi {maior} na cidade de código {código_maior}
O menor indíce de acidentes foi {menor} na cidade de código {código_menor}
A média de veículos nas {c} cidades foi {média_veículos}""")
|
nralex/Python
|
3-EstruturaDeRepeticao/exercício40.py
|
exercício40.py
|
py
| 2,234 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.date.today",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "datetime.date.today",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 21,
"usage_type": "name"
}
] |
1633248512
|
from builtins import next
from builtins import range
import os
import datetime
from xml.sax.saxutils import quoteattr
import sys
import logging
import random
import glob
from itertools import cycle
from flask import Blueprint, url_for, Response, stream_with_context, send_file, \
jsonify
from werkzeug.datastructures import Headers
from werkzeug.security import safe_join
from opendiamond.dataretriever.util import read_file_list, write_data
BASEURL = 'augment'
STYLE = False
LOCAL_OBJ_URI = True # if true, return local file path, otherwise http.
INDEXDIR = DATAROOT = None
ITEMS_PER_ITERATION = int(1e4)
KEYWORD = 'yellowthroat'
"""
Example url:
/augment/root/<ROOT_DIR>/distributed/<id>of<N>/ \
keywords/<d/r ([d]eterminant/[r]andom)>_<random_seed>_<base_rate>
/augment/root/STREAM/distributed/1of2/keywords/d_42_1.0
"""
def init(config):
global INDEXDIR, DATAROOT # pylint: disable=global-statement
INDEXDIR = 'STREAM'
DATAROOT = config.dataroot
scope_blueprint = Blueprint('augment_store', __name__)
_log = logging.getLogger(__name__)
@scope_blueprint.route('/root/<rootdir>/distributed/<int:index>of<int:total>' +
'/keywords/<params>')
@scope_blueprint.route('/root/<rootdir>/keywords/<params>')
@scope_blueprint.route('/root/<rootdir>/distributed/<int:index>of<int:total>' +
'/keywords/<params>/start/<int:start>/limit/<int:limit>')
@scope_blueprint.route('/root/<rootdir>/keywords/<params>' +
'/start/<int:start>/limit/<int:limit>')
def get_scope(rootdir, index=0, total=1, params=None, start=0, limit=sys.maxsize):
global KEYWORD
if rootdir == "0":
rootdir = INDEXDIR
rootdir = _get_obj_absolute_path(rootdir)
seed = None
percentage = 0.
seed, percentage = decode_params(params)
# Assuming the same positive list is present in all the servers
# Always create a new index file
base_list, KEYWORD = create_index(rootdir, percentage, seed, index, total)
total_entries = len(base_list)
start = start if start > 0 else 0
end = min(total_entries, start + limit) if limit > 0 else total_entries
base_list = base_list[start:end]
total_entries = end - start
def generate():
yield '<?xml version="1.0" encoding="UTF-8" ?>\n'
if STYLE:
yield '<?xml-stylesheet type="text/xsl" href="/scopelist.xsl" ?>\n'
yield '<objectlist count="{:d}">\n'.format(total_entries)
for path in base_list:
path = path.strip()
yield _get_object_element(object_path=path) + '\n'
yield '</objectlist>\n'
headers = Headers([('Content-Type', 'text/xml')])
return Response(stream_with_context(generate()),
status="200 OK",
headers=headers)
def decode_params(params):
"""
Decodes the params which are '_' seperated
<[d]eterminant/[r]andom>_<random_seed>_<baserate>
"""
keywords = params.split('_')
mix_type = keywords[0]
seed = None
if len(keywords) > 1:
seed = int(keywords[1])
if mix_type == 'r' or seed is None:
seed = random.randrange(10000)
percentage = 0.1 # default base_rate = 0.1%
if len(keywords) > 2:
percentage = float(keywords[2])
return seed, round(percentage, 4)
@scope_blueprint.route('/id/<path:object_path>')
def get_object_id(object_path):
headers = Headers([('Content-Type', 'text/xml')])
return Response(_get_object_element(object_path=object_path),
"200 OK",
headers=headers)
def _get_object_element(object_path):
path = _get_obj_absolute_path(object_path)
meta = {'_gt_label': KEYWORD}
if KEYWORD in path:
return '<object id={} src={} meta={} />' \
.format(quoteattr(url_for('.get_object_id', object_path=object_path)),
quoteattr(_get_object_src_uri(object_path)),
quoteattr(url_for('.get_object_meta', present=True)))
return '<object id={} src={} />' \
.format(quoteattr(url_for('.get_object_id', object_path=object_path)),
quoteattr(_get_object_src_uri(object_path)))
@scope_blueprint.route('/meta/<path:present>')
def get_object_meta(present=False):
attrs = dict()
if present:
attrs['_gt_label'] = KEYWORD
return jsonify(attrs)
def _get_object_src_uri(object_path):
if LOCAL_OBJ_URI:
return 'file://' + _get_obj_absolute_path(object_path)
return url_for('.get_object_src_http', obj_path=object_path)
def _get_obj_absolute_path(obj_path):
return safe_join(DATAROOT, obj_path)
@scope_blueprint.route('/obj/<path:obj_path>')
def get_object_src_http(obj_path):
path = _get_obj_absolute_path(obj_path)
headers = Headers()
# With add_etags=True, conditional=True
# Flask should be smart enough to do 304 Not Modified
response = send_file(path,
cache_timeout=datetime.timedelta(
days=365).total_seconds(),
add_etags=True,
conditional=True)
response.headers.extend(headers)
return response
def create_index(base_dir, base_rate=0.05, seed=42, rank=0, total_servers=1):
"""
Creates Index List File:
Assuming name of files NEGATIVE (e.g:subset YFCC), POSITIVE
"""
filepath_split = ['STREAM', "{:.2f}".format(base_rate), str(rank), str(total_servers), str(seed)]
filepath = '_'.join(filepath_split)
filepath = os.path.join(base_dir, filepath)
positive_path = os.path.join(base_dir, 'POSITIVE')
negative_path = os.path.join(base_dir, 'NEGATIVE')
positive_firstline = open(positive_path).readline().rstrip()
keyword = positive_firstline.split('/')[-2] # Assuming all positives are in the same parent dir
_log.info("Dir {} BR: {} Seed:{} FP{}".format(base_dir, base_rate, seed, filepath))
sys.stdout.flush()
if not os.path.exists(filepath):
positive_data = read_file_list(positive_path) # same across servers
negative_data = read_file_list(negative_path) # different across servers
random.Random(seed).shuffle(positive_data)
random.Random(seed).shuffle(negative_data)
len_positive = len(positive_data)
start_idx = int(rank * (1.0 / total_servers) * len_positive)
end_idx = int((rank+1) * (1.0 / total_servers) * len_positive)
positive_data = positive_data[start_idx:end_idx]
len_positive = len(positive_data)
negative_sample = int(len_positive * (100./base_rate -1))
negative_data = negative_data[:negative_sample]
return write_data(filepath, [negative_data, positive_data], seed), keyword
return read_file_list(filepath), keyword
|
cmusatyalab/opendiamond
|
opendiamond/dataretriever/augment_store.py
|
augment_store.py
|
py
| 6,831 |
python
|
en
|
code
| 19 |
github-code
|
6
|
[
{
"api_name": "flask.Blueprint",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sys.maxsize",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "werkzeug.datastructures.Headers",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "flask.Response",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "flask.stream_with_context",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "werkzeug.datastructures.Headers",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "flask.Response",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "xml.sax.saxutils.quoteattr",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "xml.sax.saxutils.quoteattr",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "xml.sax.saxutils.quoteattr",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "xml.sax.saxutils.quoteattr",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "xml.sax.saxutils.quoteattr",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "werkzeug.security.safe_join",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "werkzeug.datastructures.Headers",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "flask.send_file",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "opendiamond.dataretriever.util.read_file_list",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "opendiamond.dataretriever.util.read_file_list",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "random.Random",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "random.Random",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "opendiamond.dataretriever.util.write_data",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "opendiamond.dataretriever.util.read_file_list",
"line_number": 192,
"usage_type": "call"
}
] |
5345020806
|
import email.utils
import json
import os
import smtplib
import ssl
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from pathlib import Path
import jinja2
from dotenv import load_dotenv
send_user = ""
load_dotenv()
class SendEmailController:
def __init__(self):
pass
@staticmethod
def render_mail_template(template_params, template_name):
html_template_url = Path(__file__).parents[1] / "mail_templates"
html_template_loader = jinja2.FileSystemLoader(html_template_url)
html_template = jinja2.Environment(loader=html_template_loader)
email_template = html_template.get_template(template_name)
compose_email_html = email_template.render(template_params)
return compose_email_html
@staticmethod
def config_send_mail(subject, receive_email, compose_email_html):
sender_email = os.getenv("SENDER_EMAIL")
sender_name = os.getenv("SENDER_NAME")
smtp_server = os.getenv("SMTP_SERVER")
smtp_port = os.getenv("SMTP_PORT")
password = os.getenv("MAIL_PASSWORD")
list_email_cc = []
msg = MIMEMultipart("mixed")
msg["Subject"] = subject
msg["From"] = email.utils.formataddr((sender_name, sender_email))
if receive_email.upper() == "Undetermined".upper():
msg["To"] = sender_email
else:
msg["To"] = receive_email
msg["Cc"] = ", ".join(list_email_cc)
msg.attach(MIMEText(compose_email_html, "html"))
context = ssl.create_default_context()
with smtplib.SMTP(smtp_server, int(smtp_port)) as smtp:
smtp.starttls(context=context)
smtp.login(sender_email, password)
smtp.send_message(msg)
smtp.quit()
@staticmethod
def send_email(receive_email, subject, template_params, template_file_name):
# subject, template_mail = SendEmailController.build_template(template_params)
# subject = "send email test"
# template_mail = {"text": "aloha"}
template_mail = template_params
compose_email_html = SendEmailController.render_mail_template(
template_mail, template_file_name
)
if subject and template_mail:
SendEmailController.config_send_mail(
subject, receive_email, compose_email_html
)
@staticmethod
def build_template(template_params):
data = json.dumps(template_params)
data = json.loads(data)
id = data.get("id")
time = data.get("time")
# email_to = data.get("email_to")
source_ip = data.get("source_ip", "")
destination = data.get("destination")
flow_count = data.get("flow_count", -1)
tenant = data.get("tenant")
vpc = data.get("vpc")
body_data = ""
subject = "[Violation]"
if id == 1:
category = "Policy violation"
subject = subject + " " + category
body_data = {
"category": category,
"time": time,
"source_ip": source_ip,
"destination": destination,
"tenant": tenant,
"vpc": vpc,
}
elif id == 2:
category = "DDoS Attack"
subject = subject + " " + category
body_data = {
"category": category,
"time": time,
"destination": destination,
"flow_count": flow_count,
"tenant": tenant,
"vpc": vpc,
}
elif id == 3:
category = "Possible Attack"
subject = subject + " " + category
body_data = {
"category": category,
"time": time,
"destination": destination,
"tenant": tenant,
"vpc": vpc,
}
return subject, body_data
|
nguyendoantung/e-maintenance-system
|
back-end/service/utils/email/EmailController.py
|
EmailController.py
|
py
| 3,978 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "dotenv.load_dotenv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "jinja2.FileSystemLoader",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "jinja2.Environment",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "email.mime.multipart.MIMEMultipart",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "email.utils.utils.formataddr",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "email.utils.utils",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "email.utils",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "email.mime.text.MIMEText",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "ssl.create_default_context",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "smtplib.SMTP",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 75,
"usage_type": "call"
}
] |
9777903968
|
import math
from django.db import models
from django.db.models.signals import pre_save, post_save
from apps.addresses.models import Address
from apps.carts.models import Cart
from apps.billing.models import BillingProfile
from main.utils import unique_order_id_generator
# ORDER STATUS OPTIONS
ORDER_STATUS_CHOICES = (
# (stored value, Displayed value) #
('created', 'Created'),
('paid', 'Paid'),
('shipped', 'Shipped'),
('delivered', 'Delivered'),
('refunded', 'Refunded'),
)
class OrderManager(models.Manager):
def new_or_get(self, billing_profile, cart_obj):
created = False
# QUERY for existing order
qs = self.get_queryset().filter(billing_profile=billing_profile, cart=cart_obj, active=True, status='created')
print("QS -> ", qs)
# Found Order
if qs.count() == 1:
# created = False
# variable OBJECT to assign queryset
obj = qs.first()
print("FOUND -> Obj -> ", obj)
else:
# Create object instance
obj = self.model.objects.create(billing_profile=billing_profile, cart=cart_obj)
created = True
print("CREATED -> Obj -> ", obj)
return obj, created
class Order(models.Model):
billing_profile = models.ForeignKey(BillingProfile, null=True, blank=True)
shipping_address = models.ForeignKey(Address, related_name="shipping_address", null=True, blank=True)
billing_address = models.ForeignKey(Address, related_name="billing_address", null=True, blank=True)
cart = models.ForeignKey(Cart)
# pk / id -> unique, random
order_id = models.CharField(max_length=120, blank=True)
status = models.CharField(max_length=120, default='created', choices=ORDER_STATUS_CHOICES)
shipping_total = models.DecimalField(default=5.99, max_digits=7, decimal_places=2)
total = models.DecimalField(default=0.00, max_digits=7, decimal_places=2)
active = models.BooleanField(default=True)
def __str__(self):
return self.order_id
# attach Manager to Order
objects = OrderManager()
# update total instance method
def update_total(self):
# object variables
cart_total = self.cart.total
shipping_total = self.shipping_total
# Fixing data types -> (decimal, float)
new_total = math.fsum([cart_total, shipping_total])
# Format output
formatted_total = format(new_total, '.2f')
# Assign instance
self.total = formatted_total
# Save instance
self.save()
return new_total
# Method to check if the ORDER is complete
def check_done(self):
billing_profile = self.billing_profile
billing_address = self.billing_address
shipping_address = self.shipping_address
total = self.total
if billing_profile and billing_address and shipping_address and total > 0:
return True
return False
def mark_paid(self):
if self.check_done():
# Update ORDER status
self.status = "paid"
self.save()
return self.status
# GENERATE THE ORDER ID
def pre_save_create_order_id(sender, instance, *args, **kwargs):
if not instance.order_id:
instance.order_id = unique_order_id_generator(instance)
# Define Queryset --> Find any existing carts
qs = Order.objects.filter(cart=instance.cart).exclude(billing_profile=instance.billing_profile)
if qs.exists():
print("Found previous cart ... ")
# update previous carts to be in-active
qs.update(active=False)
# Connect Signal
pre_save.connect(pre_save_create_order_id, sender=Order)
# GENERATE THE ORDER TOTAL
def post_save_cart_total(sender, instance, created, *args, **kwargs):
if not created:
cart_obj = instance
cart_total = cart_obj.total
cart_id = cart_obj.id
qs = Order.objects.filter(cart__id=cart_id)
if qs.count() == 1:
order_obj = qs.first()
order_obj.update_total()
# Connect Signal
post_save.connect(post_save_cart_total, sender=Cart)
def post_save_order(sender, instance, created, *args, **kwargs):
print("Saving Order ...")
if created:
print("Updating ... Order Updated")
instance.update_total()
# Connect Signal
post_save.connect(post_save_order, sender=Order)
|
ehoversten/Ecommerce_Django
|
main/apps/orders/models.py
|
models.py
|
py
| 4,469 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "django.db.models.Manager",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "apps.billing.models.BillingProfile",
"line_number": 42,
"usage_type": "argument"
},
{
"api_name": "django.db.models",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "apps.addresses.models.Address",
"line_number": 43,
"usage_type": "argument"
},
{
"api_name": "django.db.models",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "apps.addresses.models.Address",
"line_number": 44,
"usage_type": "argument"
},
{
"api_name": "django.db.models",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "apps.carts.models.Cart",
"line_number": 45,
"usage_type": "argument"
},
{
"api_name": "django.db.models",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "django.db.models.DecimalField",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "django.db.models.DecimalField",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "math.fsum",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "main.utils.unique_order_id_generator",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "django.db.models.signals.pre_save.connect",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "django.db.models.signals.pre_save",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "django.db.models.signals.post_save.connect",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "django.db.models.signals.post_save",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "apps.carts.models.Cart",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "django.db.models.signals.post_save.connect",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "django.db.models.signals.post_save",
"line_number": 126,
"usage_type": "name"
}
] |
41409285856
|
import json
estudantes = []
professores = []
disciplinas = []
turmas = []
matriculas = []
def main():
while True:
print("Menu Principal")
print("1. Estudantes")
print("2. Disciplinas")
print("3. Professores")
print("4. Turmas")
print("5. Matrículas")
print("6. Sair")
opcao_principal = input("Escolha uma opção: ")
if opcao_principal == "1":
print("Você escolheu a opção Estudantes.")
menu_operacoes_estudantes()
elif opcao_principal == "2":
print("Você escolheu a opção Disciplinas.")
menu_operacoes_disciplinas()
elif opcao_principal == "3":
print("Você escolheu a opção Professores.")
menu_operacoes_professores()
elif opcao_principal == "4":
print("Você escolheu a opção Turmas.")
menu_operacoes_turmas()
elif opcao_principal == "5":
print("Você escolheu a opção Matrículas.")
menu_operacoes_matriculas()
elif opcao_principal == "6":
print("Saindo...")
break
else:
print("Opção inválida. Tente novamente.")
def menu_operacoes_estudantes():
while True:
print("\nMenu de Operações - Estudantes")
print("1. Incluir")
print("2. Listar")
print("3. Atualizar")
print("4. Excluir")
print("5. Voltar ao menu principal")
opcao_operacoes = input("\nEscolha uma opção: ")
if opcao_operacoes == "1":
incluir_estudante()
elif opcao_operacoes == "2":
listar_estudantes()
elif opcao_operacoes == "3":
atualizar_estudante()
elif opcao_operacoes == "4":
excluir_estudante()
elif opcao_operacoes == "5":
break
else:
print("Opção inválida. Tente novamente.")
def incluir_estudante():
codigo = int(input("\nDigite o código do estudante: "))
nome = input("\nDigite o nome do estudante: ")
cpf = input("\nDigite o CPF do estudante: ")
estudantes = recuperar_estudantes()
estudantes.append({"codigo": codigo, "nome": nome, "cpf": cpf})
salvar_estudantes(estudantes)
print(f"Estudante {nome} incluído com sucesso!")
def listar_estudantes():
estudantes = recuperar_estudantes()
if len(estudantes) == 0:
print("\nNão há estudantes cadastrados.")
else:
print("\nEstudantes cadastrados:")
for estudante in estudantes:
print(f"- Código: {estudante['codigo']}, Nome: {estudante['nome']}, CPF: {estudante['cpf']}")
def atualizar_estudante():
codigo = int(input("\nDigite o código do estudante que deseja atualizar: "))
estudantes = recuperar_estudantes()
for estudante in estudantes:
if estudante["codigo"] == codigo:
novo_codigo = int(input("\nDigite o novo código do estudante: "))
novo_nome = input("\nDigite o novo nome do estudante: ")
novo_cpf = input("\nDigite o novo CPF do estudante: ")
estudante["codigo"] = novo_codigo
estudante["nome"] = novo_nome
estudante["cpf"] = novo_cpf
salvar_estudantes(estudantes)
print(f"Estudante {codigo} atualizado com sucesso!")
return
print(f"Estudante com código {codigo} não encontrado.")
def excluir_estudante():
codigo = int(input("\nDigite o código do estudante que deseja excluir: "))
estudantes = recuperar_estudantes()
for i, estudante in enumerate(estudantes):
if estudante["codigo"] == codigo:
del estudantes[i]
salvar_estudantes(estudantes)
print(f"Estudante {codigo} excluído com sucesso!")
return
print(f"Estudante com código {codigo} não encontrado.")
def salvar_estudantes(estudantes):
with open('estudantes.json', 'w') as f:
json.dump(estudantes, f)
def recuperar_estudantes():
try:
with open('estudantes.json', 'r') as f:
return json.load(f)
except FileNotFoundError:
return []
def menu_operacoes_professores():
while True:
print("\nMenu de Operações - Professores")
print("1. Incluir")
print("2. Listar")
print("3. Atualizar")
print("4. Excluir")
print("5. Voltar ao menu principal")
opcao_operacoes = input("\nEscolha uma opção: ")
if opcao_operacoes == "1":
incluir_professor()
elif opcao_operacoes == "2":
listar_professores()
elif opcao_operacoes == "3":
atualizar_professor()
elif opcao_operacoes == "4":
excluir_professor()
elif opcao_operacoes == "5":
break
else:
print("Opção inválida. Tente novamente.")
def incluir_professor():
codigo = int(input("\nDigite o código do professor: "))
nome = input("\nDigite o nome do professor: ")
cpf = input("\nDigite o CPF do professor: ")
professores = recuperar_professores()
professores.append({"codigo": codigo, "nome": nome, "cpf": cpf})
salvar_professores(professores)
print(f"Professor {nome} incluído com sucesso!")
def listar_professores():
professores = recuperar_professores()
if len(professores) == 0:
print("\nNão há professores cadastrados.")
else:
print("\nProfessores cadastrados:")
for professor in professores:
print(f"- Código: {professor['codigo']}, Nome: {professor['nome']}, CPF: {professor['cpf']}")
def atualizar_professor():
codigo = int(input("\nDigite o código do professor que deseja atualizar: "))
professores = recuperar_professores()
for professor in professores:
if professor["codigo"] == codigo:
novo_codigo = int(input("\nDigite o novo código do professor: "))
novo_nome = input("\nDigite o novo nome do professor: ")
novo_cpf = input("\nDigite o novo CPF do professor: ")
professor["codigo"] = novo_codigo
professor["nome"] = novo_nome
professor["cpf"] = novo_cpf
salvar_professores(professores)
print(f"Professor {codigo} atualizado com sucesso!")
return
print(f"Professor com código {codigo} não encontrado.")
def excluir_professor():
codigo = int(input("\nDigite o código do professor que deseja excluir: "))
professores = recuperar_professores()
for i, professor in enumerate(professores):
if professor["codigo"] == codigo:
del professores[i]
salvar_professores(professores)
print(f"Professor {codigo} excluído com sucesso!")
return
print(f"Professor com código {codigo} não encontrado.")
def salvar_professores(professores):
with open('professores.json', 'w') as f:
json.dump(professores, f)
def recuperar_professores():
try:
with open('professores.json', 'r') as f:
return json.load(f)
except FileNotFoundError:
return []
def menu_operacoes_disciplinas():
while True:
print("\nMenu de Operações - Disciplinas")
print("1. Incluir")
print("2. Listar")
print("3. Atualizar")
print("4. Excluir")
print("5. Voltar ao menu principal")
opcao_operacoes = input("\nEscolha uma opção: ")
if opcao_operacoes == "1":
incluir_disciplina()
elif opcao_operacoes == "2":
listar_disciplinas()
elif opcao_operacoes == "3":
atualizar_disciplina()
elif opcao_operacoes == "4":
excluir_disciplina()
elif opcao_operacoes == "5":
break
else:
print("Opção inválida. Tente novamente.")
def incluir_disciplina():
codigo = int(input("\nDigite o código da disciplina: "))
nome = input("\nDigite o nome da disciplina: ")
disciplinas = recuperar_disciplinas()
disciplinas.append({"codigo": codigo, "nome": nome})
salvar_disciplinas(disciplinas)
print(f"Disciplina {nome} incluída com sucesso!")
def listar_disciplinas():
disciplinas = recuperar_disciplinas()
if len(disciplinas) == 0:
print("\nNão há disciplinas cadastradas.")
else:
print("\nDisciplinas cadastradas:")
for disciplina in disciplinas:
print(f"- Código: {disciplina['codigo']}, Nome: {disciplina['nome']}")
def atualizar_disciplina():
codigo = int(input("\nDigite o código da disciplina que deseja atualizar: "))
disciplinas = recuperar_disciplinas()
for disciplina in disciplinas:
if disciplina["codigo"] == codigo:
novo_codigo = int(input("\nDigite o novo código da disciplina: "))
novo_nome = input("\nDigite o novo nome da disciplina: ")
disciplina["codigo"] = novo_codigo
disciplina["nome"] = novo_nome
salvar_disciplinas(disciplinas)
print(f"Disciplina {codigo} atualizada com sucesso!")
return
print(f"Disciplina com código {codigo} não encontrada.")
def excluir_disciplina():
codigo = int(input("\nDigite o código da disciplina que deseja excluir: "))
disciplinas = recuperar_disciplinas()
for i, disciplina in enumerate(disciplinas):
if disciplina["codigo"] == codigo:
del disciplinas[i]
salvar_disciplinas(disciplinas)
print(f"Disciplina {codigo} excluída com sucesso!")
return
print(f"Disciplina com código {codigo} não encontrada.")
def salvar_disciplinas(disciplinas):
with open('disciplinas.json', 'w') as f:
json.dump(disciplinas, f)
def recuperar_disciplinas():
try:
with open('disciplinas.json', 'r') as f:
return json.load(f)
except FileNotFoundError:
return []
def menu_operacoes_turmas():
while True:
print("\nMenu de Operações - Turmas")
print("1. Incluir")
print("2. Listar")
print("3. Atualizar")
print("4. Excluir")
print("5. Voltar ao menu principal")
opcao_operacoes = input("\nEscolha uma opção: ")
if opcao_operacoes == "1":
incluir_turma()
elif opcao_operacoes == "2":
listar_turmas()
elif opcao_operacoes == "3":
atualizar_turma()
elif opcao_operacoes == "4":
excluir_turma()
elif opcao_operacoes == "5":
break
else:
print("Opção inválida. Tente novamente.")
def incluir_turma():
codigo = int(input("\nDigite o código da turma: "))
codigo_professor = int(input("\nDigite o código do professor: "))
codigo_disciplina = int(input("\nDigite o código da disciplina: "))
professores = recuperar_professores()
if not any(professor["codigo"] == codigo_professor for professor in professores):
print(f"Professor com código {codigo_professor} não encontrado.")
return
disciplinas = recuperar_disciplinas()
if not any(disciplina["codigo"] == codigo_disciplina for disciplina in disciplinas):
print(f"Disciplina com código {codigo_disciplina} não encontrada.")
return
turmas = recuperar_turmas()
turmas.append({"codigo": codigo, "codigo_professor": codigo_professor, "codigo_disciplina": codigo_disciplina})
salvar_turmas(turmas)
print(f"Turma {codigo} incluída com sucesso!")
def listar_turmas():
turmas = recuperar_turmas()
if len(turmas) == 0:
print("\nNão há turmas cadastradas.")
else:
print("\nTurmas cadastradas:")
for turma in turmas:
print(f"- Código: {turma['codigo']}, Código do Professor: {turma['codigo_professor']}, Código da Disciplina: {turma['codigo_disciplina']}")
def atualizar_turma():
codigo = int(input("\nDigite o código da turma que deseja atualizar: "))
turmas = recuperar_turmas()
for turma in turmas:
if turma["codigo"] == codigo:
novo_codigo = int(input("\nDigite o novo código da turma: "))
novo_codigo_professor = int(input("\nDigite o novo código do professor: "))
novo_codigo_disciplina = int(input("\nDigite o novo código da disciplina: "))
professores = recuperar_professores()
if not any(professor["codigo"] == novo_codigo_professor for professor in professores):
print(f"Professor com código {novo_codigo_professor} não encontrado.")
return
disciplinas = recuperar_disciplinas()
if not any(disciplina["codigo"] == novo_codigo_disciplina for disciplina in disciplinas):
print(f"Disciplina com código {novo_codigo_disciplina} não encontrada.")
return
turma["codigo"] = novo_codigo
turma["codigo_professor"] = novo_codigo_professor
turma["codigo_disciplina"] = novo_codigo_disciplina
salvar_turmas(turmas)
print(f"Turma {codigo} atualizada com sucesso!")
return
print(f"Turma com código {codigo} não encontrada.")
def excluir_turma():
codigo = int(input("\nDigite o código da turma que deseja excluir: "))
turmas = recuperar_turmas()
for i, turma in enumerate(turmas):
if turma["codigo"] == codigo:
del turmas[i]
salvar_turmas(turmas)
print(f"Turma {codigo} excluída com sucesso!")
return
print(f"Turma com código {codigo} não encontrada.")
def salvar_turmas(turmas):
with open('turmas.json', 'w') as f:
json.dump(turmas, f)
def recuperar_turmas():
try:
with open('turmas.json', 'r') as f:
return json.load(f)
except FileNotFoundError:
return []
def menu_operacoes_matriculas():
while True:
print("\nMenu de Operações - Matrículas")
print("1. Incluir")
print("2. Listar")
print("3. Atualizar")
print("4. Excluir")
print("5. Voltar ao menu principal")
opcao_operacoes = input("\nEscolha uma opção: ")
if opcao_operacoes == "1":
incluir_matricula()
elif opcao_operacoes == "2":
listar_matriculas()
elif opcao_operacoes == "3":
atualizar_matricula()
elif opcao_operacoes == "4":
excluir_matricula()
elif opcao_operacoes == "5":
break
else:
print("Opção inválida. Tente novamente.")
def incluir_matricula():
codigo_turma = int(input("\nDigite o código da turma: "))
codigo_estudante = int(input("\nDigite o código do estudante: "))
turmas = recuperar_turmas()
if not any(turma["codigo"] == codigo_turma for turma in turmas):
print(f"Turma com código {codigo_turma} não encontrada.")
return
estudantes = recuperar_estudantes()
if not any(estudante["codigo"] == codigo_estudante for estudante in estudantes):
print(f"Estudante com código {codigo_estudante} não encontrado.")
return
matriculas = recuperar_matriculas()
matriculas.append({"codigo_turma": codigo_turma, "codigo_estudante": codigo_estudante})
salvar_matriculas(matriculas)
print(f"Matrícula na turma {codigo_turma} incluída com sucesso!")
def listar_matriculas():
matriculas = recuperar_matriculas()
if len(matriculas) == 0:
print("\nNão há matrículas cadastradas.")
else:
print("\nMatrículas cadastradas:")
for matricula in matriculas:
print(f"- Código da Turma: {matricula['codigo_turma']}, Código do Estudante: {matricula['codigo_estudante']}")
def atualizar_matricula():
codigo_turma = int(input("\nDigite o código da turma da matrícula que deseja atualizar: "))
codigo_estudante = int(input("\nDigite o código do estudante da matrícula que deseja atualizar: "))
matriculas = recuperar_matriculas()
for matricula in matriculas:
if matricula["codigo_turma"] == codigo_turma and matricula["codigo_estudante"] == codigo_estudante:
novo_codigo_turma = int(input("\nDigite o novo código da turma: "))
novo_codigo_estudante = int(input("\nDigite o novo código do estudante: "))
turmas = recuperar_turmas()
if not any(turma["codigo"] == novo_codigo_turma for turma in turmas):
print(f"Turma com código {novo_codigo_turma} não encontrada.")
return
estudantes = recuperar_estudantes()
if not any(estudante["codigo"] == novo_codigo_estudante for estudante in estudantes):
print(f"Estudante com código {novo_codigo_estudante} não encontrado.")
return
matricula["codigo_turma"] = novo_codigo_turma
matricula["codigo_estudante"] = novo_codigo_estudante
salvar_matriculas(matriculas)
print(f"Matrícula na turma {codigo_turma} atualizada com sucesso!")
return
print(f"Matrícula na turma {codigo_turma} com estudante de código {codigo_estudante} não encontrada.")
def excluir_matricula():
codigo_turma = int(input("\nDigite o código da turma da matrícula que deseja excluir: "))
codigo_estudante = int(input("\nDigite o código do estudante da matrícula que deseja excluir: "))
matriculas = recuperar_matriculas()
for i, matricula in enumerate(matriculas):
if matricula["codigo_turma"] == codigo_turma and matricula["codigo_estudante"] == codigo_estudante:
del matriculas[i]
salvar_matriculas(matriculas)
print(f"Matrícula na turma {codigo_turma} excluída com sucesso!")
return
print(f"Matrícula na turma {codigo_turma} com estudante de código {codigo_estudante} não encontrada.")
def salvar_matriculas(matriculas):
with open('matriculas.json', 'w') as f:
json.dump(matriculas, f)
def recuperar_matriculas():
try:
with open('matriculas.json', 'r') as f:
return json.load(f)
except FileNotFoundError:
return []
if __name__ == "__main__":
main()
|
enzupain/Python-Projetos
|
sistema gerenciamento academico.py
|
sistema gerenciamento academico.py
|
py
| 18,786 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "json.dump",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 512,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 518,
"usage_type": "call"
}
] |
60822349
|
"""
scrapy1.5限制request.callback and request.errback不能为非None以外的任何非可调用对象,导致一些功能无法实现。这里解除该限制
"""
from scrapy import Request as _Request
from scrapy.http.headers import Headers
class Request(_Request):
def __init__(self, url, callback=None, method='GET', headers=None, body=None,
cookies=None, meta=None, encoding='utf-8', priority=0,
dont_filter=False, errback=None, flags=None, cb_kwargs=None):
self._encoding = encoding # this one has to be set first
self.method = str(method).upper()
self._set_url(url)
self._set_body(body)
assert isinstance(priority, int), "Request priority not an integer: %r" % priority
self.priority = priority
assert callback or not errback, "Cannot use errback without a callback"
self.callback = callback
self.errback = errback
self.cookies = cookies or {}
self.headers = Headers(headers or {}, encoding=encoding)
self.dont_filter = dont_filter
self._meta = dict(meta) if meta else None
self._cb_kwargs = dict(cb_kwargs) if cb_kwargs else None
self.flags = [] if flags is None else list(flags)
|
ShichaoMa/structure_spider
|
structor/custom_request.py
|
custom_request.py
|
py
| 1,255 |
python
|
en
|
code
| 29 |
github-code
|
6
|
[
{
"api_name": "scrapy.Request",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "scrapy.http.headers.Headers",
"line_number": 25,
"usage_type": "call"
}
] |
37559653754
|
from selenium import webdriver
import time
# Have to change the path according to where your chromedriver locate
PATH = "C:\Program Files (x86)\chromedriver.exe"
driver = webdriver.Chrome(PATH)
driver.get("http://ec2-54-208-152-154.compute-1.amazonaws.com/")
arrayOfBar = []
arrayLeftBowl = []
arrayRightBowl = []
n = 9;
for i in range(n):
arrayLeftBowl.append(driver.find_element_by_id("left_" + str(i)))
arrayRightBowl.append(driver.find_element_by_id("right_" + str(i)))
arrayOfBar.append(driver.find_element_by_id("coin_" + str(i)))
"""
This problem is best to divide and conquer. It is suited for Binary Search Algorithm.
We can divide the array of gold bar into three locations. Left table, mid, and the right table.
If the left table and right table are equal weight then it mean the mid is FAKE GOLD.
But if the left table is less than the right table. Then we would toss everthing from mid + 1
to n (size of array). Or if the left table is greater than the right table, then we would toss everything from 0 to mid - 1.
Doing this we are dividing the search item by half of the size of the array and conquer it by picking the table that is less than.
This would give us time complexity of O(logn) time.
"""
low = 0
high = len(arrayOfBar) - 1
while(low < high):
mid = int(low + ((high - low) / 2))
# reset the table
driver.find_element_by_xpath("/html/body/div/div/div[1]/div[4]/button[1]").click()
j = 0
for i in range (low, mid):
# setting the left table
arrayLeftBowl[j].send_keys(i)
j += 1
j = 0
for i in range (mid + 1, high + 1):
# setting the right table
arrayRightBowl[j].send_keys(i)
j += 1
# Weight the item
driver.find_element_by_xpath("/html/body/div/div/div[1]/div[4]/button[2]").click()
time.sleep(5)
# getting the result after weight
result = driver.find_element_by_xpath("/html/body/div/div/div[1]/div[2]/button").text
if(j == 1):
if(result == "<"):
print("Fake gold is " + str(low))
arrayOfBar[low].click()
break
elif(result == ">"):
print("Fake gold is " + str(high))
arrayOfBar[high].click()
break
if(result == "="):
print("Fake gold is " + str(mid))
arrayOfBar[mid].click()
break
elif( result == ">"):
low = mid;
else:
high = mid;
time.sleep(3)
driver.quit()
|
LiyaNorng/Fetch-Rewards-Coding-Exercise
|
FakeGold.py
|
FakeGold.py
|
py
| 2,344 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 71,
"usage_type": "call"
}
] |
32509281023
|
import torch
import torch.nn as nn # nn.linear 라이브러리를 사용하기 위해 import
# F.mse(mean squared error) <- linear regression, LOSS Function 존재
# Classification problem에서 사용하는 loss function : Cross-Entropy
import torch.nn.functional as F
import torch.optim as optim # SGD, Adam, etc.최적화 라이브러리
# 임의 데이터 생성
# 입력이 1, 출력이 1
# Multi-variable linear regression (입력 3, 출력 1)
# input(x_train) 4x3 2D Tensor 생성
x_train = torch.FloatTensor([[90, 73, 89],
[66, 92, 83],
[86, 87, 78],
[85, 96, 75]])
# y_train (GT)
y_train = torch.FloatTensor([[152],
[185],
[100],
[193]])
# 모델 선언 및 초기화
# y = WX (w1*x1 + w2*x2...wn*xn + b)
# nn.Linear(input_dim, output_dim)
# 초기화
# w = randn(1)
# model.paramters (weight: 3, bias: 1)
# weight, bias : 랜덤한 값으로 자동 셋팅
model = nn.Linear(3, 1) # get_weights()함수 참고..
# model.parameters() 최적화, w,b로 미분을 해야하므로 (requires_grad=True) 셋팅된 것을 확인할 수 있음.
print(list(model.parameters()))
optimizer = optim.SGD(model.parameters(), lr=0.01) # learning_rate 설정: 노가다하면서.. 구하세요.
# iteration 횟수 지정 (epoch 횟수 지정)
# epoch : 전체 훈련 데이터에 대해 경사 하강법을 적용하는 횟수 (2000번을 돌면서 w, b 값을 update)
nb_epochs = 2000
for epoch in range(nb_epochs+1):
# H(x) 계산 wx+b를 한번 계산한 결과값을 pred 변수에 assign
# x_train = 입력 데이터 (1, 2, 3), w (0.6242), b (-0.1192)
# 추정값 = w*x_train+b
pred = model(x_train)
# cost 계산 (loss function : Mean Square Error)
# Cost fuction, loss Function --> Cost, Loss, Error
# mse = mean(sum(pow(y, y^))))
cost = F.mse_loss(pred, y_train) # y_train (GT, 결과, 2, 4, 6)
# SGD를 이용해서 최적값 도출하는 부분 (w,b 값을 조정)
optimizer.zero_grad() # gradient 계산 시 zero 초기화가 들어가 있지 않으면 누적된 값으로 적용
cost.backward() # 실제 기울기 값 계산하는 부분
optimizer.step() # w, b 값을 update 하는 부분
# 100번 마다 로그 출력
if epoch % 100 == 0:
tmp = list(model.parameters())
print(f'Epoch: {epoch:4d} Cost : {cost.item(): .6f}')
print(f'w, b: {tmp[0]}, {tmp[1]}')
new_var = torch.FloatTensor([[73, 80, 75]])
# 152에 근접한 값이 출력이 되면 학습이 잘 된 것으로 판단.
pred_y = model(new_var) # model.forward(new_var)
|
JEONJinah/Shin
|
multi_varialbe_LR.py
|
multi_varialbe_LR.py
|
py
| 2,761 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.FloatTensor",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn.Linear",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torch.optim.SGD",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.mse_loss",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "torch.FloatTensor",
"line_number": 62,
"usage_type": "call"
}
] |
24452709455
|
import json
import os
import random
from nonebot import on_keyword, logger
from nonebot.adapters.mirai2 import MessageSegment, Bot, Event
tarot = on_keyword({"塔罗牌"}, priority=5)
@tarot.handle()
async def send_tarot(bot: Bot, event: Event):
"""塔罗牌"""
card, filename = await get_random_tarot()
image_dir = random.choice(['normal', 'reverse'])
card_type = '正位' if image_dir == 'normal' else '逆位'
content = f"{card['name']} ({card['name-en']}) {card_type}\n牌意:{card['meaning'][image_dir]}"
elements = []
img_path = os.path.join(f"{os.getcwd()}", "warfarin", "plugins", "Tarot", "resource", f"{image_dir}",
f"{filename}.jpg")
logger.debug(f"塔罗牌图片:{img_path}")
if filename and os.path.exists(img_path):
elements.append(MessageSegment.image(path=img_path))
elements.append(MessageSegment.plain(content))
await tarot.finish(elements)
async def get_random_tarot():
# path = f"{os.getcwd()}/warfarin/plugins/Tarot/resource/tarot.json"
path = os.path.join(f"{os.getcwd()}", "warfarin", "plugins", "Tarot", "resource", "tarot.json")
with open(path, 'r', encoding='utf-8') as json_file:
data = json.load(json_file)
kinds = ['major', 'pentacles', 'wands', 'cups', 'swords']
cards = []
for kind in kinds:
cards.extend(data[kind])
card = random.choice(cards)
filename = ''
for kind in kinds:
if card in data[kind]:
filename = '{}{:02d}'.format(kind, card['num'])
break
return card, filename
|
mzttsaintly/Warfarin-bot
|
warfarin/plugins/Tarot/__init__.py
|
__init__.py
|
py
| 1,590 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "nonebot.on_keyword",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "nonebot.adapters.mirai2.Bot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "nonebot.adapters.mirai2.Event",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "random.choice",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "nonebot.logger.debug",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "nonebot.logger",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "nonebot.adapters.mirai2.MessageSegment.image",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "nonebot.adapters.mirai2.MessageSegment",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "nonebot.adapters.mirai2.MessageSegment.plain",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "nonebot.adapters.mirai2.MessageSegment",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 38,
"usage_type": "call"
}
] |
12959904969
|
from .declarative import (
declarative,
get_declared,
get_members,
)
from .dispatch import dispatch
from .evaluate import (
evaluate,
evaluate_recursive,
evaluate_recursive_strict,
evaluate_strict,
get_callable_description,
matches,
)
from .namespace import (
EMPTY,
flatten,
flatten_items,
getattr_path,
Namespace,
setattr_path,
setdefaults_path,
)
from .refinable import (
refinable,
Refinable,
RefinableObject,
)
from .shortcut import (
class_shortcut,
get_shortcuts_by_name,
is_shortcut,
shortcut,
Shortcut,
)
from .sort_after import (
LAST,
sort_after,
)
from .with_meta import with_meta
__version__ = '5.7.0'
__all__ = [
'assert_kwargs_empty',
'class_shortcut',
'declarative',
'dispatch',
'EMPTY',
'evaluate',
'evaluate_strict',
'evaluate_recursive',
'evaluate_recursive_strict',
'filter_show_recursive',
'flatten',
'flatten_items',
'full_function_name',
'get_shortcuts_by_name',
'getattr_path',
'get_members',
'is_shortcut',
'LAST',
'matches',
'Namespace',
'remove_show_recursive',
'refinable',
'Refinable',
'RefinableObject',
'setattr_path',
'setdefaults_path',
'shortcut',
'Shortcut',
'should_show',
'sort_after',
'with_meta',
]
def should_show(item):
try:
r = item.show
except AttributeError:
try:
r = item['show']
except (TypeError, KeyError):
return True
if callable(r):
assert False, "`show` was a callable. You probably forgot to evaluate it. The callable was: {}".format(get_callable_description(r))
return r
def filter_show_recursive(item):
if isinstance(item, list):
return [filter_show_recursive(v) for v in item if should_show(v)]
if isinstance(item, dict):
# The type(item)(** stuff is to preserve the original type
return type(item)(**{k: filter_show_recursive(v) for k, v in dict.items(item) if should_show(v)})
if isinstance(item, set):
return {filter_show_recursive(v) for v in item if should_show(v)}
return item
def remove_keys_recursive(item, keys_to_remove):
if isinstance(item, list):
return [remove_keys_recursive(v, keys_to_remove) for v in item]
if isinstance(item, set):
return {remove_keys_recursive(v, keys_to_remove) for v in item}
if isinstance(item, dict):
return {k: remove_keys_recursive(v, keys_to_remove) for k, v in dict.items(item) if k not in keys_to_remove}
return item
def remove_show_recursive(item):
return remove_keys_recursive(item, {'show'})
def assert_kwargs_empty(kwargs):
if kwargs:
import traceback
function_name = traceback.extract_stack()[-2][2]
raise TypeError('%s() got unexpected keyword arguments %s' % (function_name, ', '.join(["'%s'" % x for x in sorted(kwargs.keys())])))
def full_function_name(f):
return '%s.%s' % (f.__module__, f.__name__)
def generate_rst_docs(directory, classes, missing_objects=None): # pragma: no coverage
"""
Generate documentation for tri.declarative APIs
:param directory: directory to write the .rst files into
:param classes: list of classes to generate documentation for
:param missing_objects: tuple of objects to count as missing markers, if applicable
"""
doc_by_filename = _generate_rst_docs(classes=classes, missing_objects=missing_objects) # pragma: no mutate
for filename, doc in doc_by_filename: # pragma: no mutate
with open(directory + filename, 'w') as f2: # pragma: no mutate
f2.write(doc) # pragma: no mutate
# noinspection PyShadowingNames
def _generate_rst_docs(classes, missing_objects=None):
if missing_objects is None:
missing_objects = tuple()
import re
def docstring_param_dict(obj):
# noinspection PyShadowingNames
doc = obj.__doc__
if doc is None:
return dict(text=None, params={})
return dict(
text=doc[:doc.find(':param')].strip() if ':param' in doc else doc.strip(),
params=dict(re.findall(r":param (?P<name>\w+): (?P<text>.*)", doc))
)
def indent(levels, s):
return (' ' * levels * 4) + s.strip()
# noinspection PyShadowingNames
def get_namespace(c):
return Namespace(
{k: c.__init__.dispatch.get(k) for k, v in get_declared(c, 'refinable_members').items()})
for c in classes:
from io import StringIO
f = StringIO()
def w(levels, s):
f.write(indent(levels, s))
f.write('\n')
def section(level, title):
underline = {
0: '=',
1: '-',
2: '^',
}[level] * len(title)
w(0, title)
w(0, underline)
w(0, '')
section(0, c.__name__)
class_doc = docstring_param_dict(c)
constructor_doc = docstring_param_dict(c.__init__)
if class_doc['text']:
f.write(class_doc['text'])
w(0, '')
if constructor_doc['text']:
if class_doc['text']:
w(0, '')
f.write(constructor_doc['text'])
w(0, '')
w(0, '')
section(1, 'Refinable members')
# noinspection PyCallByClass
for refinable_, value in sorted(dict.items(get_namespace(c))):
w(0, '* `' + refinable_ + '`')
if constructor_doc['params'].get(refinable_):
w(1, constructor_doc['params'][refinable_])
w(0, '')
w(0, '')
defaults = Namespace()
for refinable_, value in sorted(get_namespace(c).items()):
if value not in (None,) + missing_objects:
defaults[refinable_] = value
if defaults:
section(2, 'Defaults')
for k, v in sorted(flatten_items(defaults)):
if v != {}:
if '<lambda>' in repr(v):
import inspect
v = inspect.getsource(v)
v = v[v.find('lambda'):]
v = v.strip().strip(',')
elif callable(v):
v = v.__module__ + '.' + v.__name__
if v == '':
v = '""'
w(0, '* `%s`' % k)
w(1, '* `%s`' % v)
w(0, '')
shortcuts = get_shortcuts_by_name(c)
if shortcuts:
section(1, 'Shortcuts')
for name, shortcut_ in sorted(shortcuts.items()):
section(2, f'`{name}`')
if shortcut_.__doc__:
doc = shortcut_.__doc__
f.write(doc.strip())
w(0, '')
w(0, '')
yield '/%s.rst' % c.__name__, f.getvalue()
|
jlubcke/tri.declarative
|
lib/tri_declarative/__init__.py
|
__init__.py
|
py
| 6,981 |
python
|
en
|
code
| 17 |
github-code
|
6
|
[
{
"api_name": "evaluate.get_callable_description",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "traceback.extract_stack",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "namespace.Namespace",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "declarative.get_declared",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "io.StringIO",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "namespace.Namespace",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "namespace.flatten_items",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "inspect.getsource",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "shortcut.get_shortcuts_by_name",
"line_number": 247,
"usage_type": "call"
}
] |
17940292131
|
from sklearn.metrics import confusion_matrix, roc_auc_score
import json
import numpy as np
def general_result(y_true, y_score, threshold=0.6):
def pred(score, best_thresh):
label = 0
if score > best_thresh:
label = 1
return label
y_score = np.array(y_score)
if len(y_score.shape) == 2:
y_score = y_score[:,1]
# best_thresh = select_threshold(y_true, y_score)
best_thresh = threshold
y_pred = [pred(score, best_thresh) for score in y_score]
c_m = confusion_matrix(y_true, y_pred)
print("model works on the data, the confusion_matrix is:(Threshold:{})".format(str(best_thresh)), c_m)
acc = (c_m[0, 0]+c_m[1, 1])/(c_m[0, 0]+c_m[0, 1]+c_m[1, 0]+c_m[1, 1])
print("model works on the data, the accuracy is:", acc)
pre = c_m[1, 1]/(c_m[1, 1]+c_m[0, 1])
print("model works on the data, the precision is:", pre)
re = c_m[1, 1]/(c_m[1, 1]+c_m[1, 0])
print("model works on the data, the recall is:", re)
f_score = (2*pre*re)/(pre+re)
print("model works on the data, the F1-score is:", f_score)
#train_label_binary = to_categorical(train_label)
auc = roc_auc_score(y_true, y_score)
print("model works on the data, the auc is:", auc)
def select_threshold(y_true, y_score):
def pred(score, threshold):
label = 0
if score > threshold:
label = 1
return label
best_th = 0
f1_score = 0
output = {'Precision':[], 'Recall':[]}
for i in range(1,100):
threshold = i/100
y_pred = [pred(score, threshold) for score in y_score]
c_m = confusion_matrix(y_true, y_pred)
try:
pre = c_m[1, 1]/(c_m[1, 1]+c_m[0, 1])
re = c_m[1, 1]/(c_m[1, 1]+c_m[1, 0])
output['Precision'].append(pre)
output['Recall'].append((re))
f_score = (2*pre*re)/(pre+re)
if f_score>f1_score :
f1_score = f_score
best_th = threshold
except:
continue
if len(output['Precision']) != 99:
print("Unknown Error occurred when generate results.")
with open('Precision_Recall.txt','w') as w:
w.write(json.dumps(output))
return best_th
|
jingmouren/antifraud
|
antifraud/metrics/normal_function.py
|
normal_function.py
|
py
| 2,233 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.array",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.confusion_matrix",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_auc_score",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.confusion_matrix",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 59,
"usage_type": "call"
}
] |
32483785153
|
import random
from collections import Counter
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init
from mmcv.runner import force_fp32
import nltk
from nltk.cluster.kmeans import KMeansClusterer
from mmdet.core import (anchor_inside_flags, bbox_overlaps, build_assigner,
build_sampler, images_to_levels, multi_apply,
reduce_mean, unmap)
from mmdet.core.utils import filter_scores_and_topk
class attention1d(nn.Module):
def __init__(self, in_planes=1, ratios=16, K=4, temperature=1, init_weight=True): # quality map
super(attention1d, self).__init__()
assert temperature % 3 == 1
if in_planes != 3:
hidden_planes = int(in_planes * ratios)
else:
hidden_planes = K
self.fc1 = nn.Conv2d(in_planes, hidden_planes, 1, bias=False)
# self.bn = nn.BatchNorm2d(hidden_planes)
self.fc2 = nn.Conv2d(hidden_planes, K, 1, bias=True)
self.temperature = temperature
self.K = K
if init_weight:
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m ,nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def updata_temperature(self):
if self.temperature!=1:
self.temperature -= 3
print('Change temperature to:', str(self.temperature))
def forward(self, x):
_N, _C, _H, _W = x.size()
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return F.softmax(x / self.temperature, 1)
class Dynamic_conv1d(nn.Module):
'''
Args:
x(Tensor): shape (batch, in_channel, height, width)
quality_map(Tensor): shape (batch, 1, height, width)
Return:
output(Tensor): shape (batch, out_channel, height, width)
Note:
in_channel must eqal to out_channel
'''
def __init__(self, in_planes, out_planes, ratio=16.0, stride=1, padding=0, dilation=1, bias=True, K=2,temperature=1, init_weight=True):
super(Dynamic_conv1d, self).__init__()
self.in_planes = in_planes
self.out_planes = out_planes
self.stride = stride
self.padding = padding
self.dilation = dilation
self.bias = bias
self.K = K
self.attention = attention1d(1, ratio, K, temperature)
self.weight = nn.Parameter(torch.randn(K, out_planes, in_planes), requires_grad=True)
if bias:
self.bias = nn.Parameter(torch.zeros(K, out_planes))
else:
self.bias = None
if init_weight:
self._initialize_weights()
#TODO 初始化
def _initialize_weights(self): # maybe problematic
for i in range(self.K):
nn.init.kaiming_uniform_(self.weight[i])
def update_temperature(self):
self.attention.updata_temperature()
def forward(self, x, quality_map):# a different version of dynamic convlution, is another kind of spatial attention
residule = x
batch_size, in_planes, height, width = x.size()
softmax_attention = self.attention(quality_map)
print(f'attention size {softmax_attention.size()}')
print(f'attention {softmax_attention}')
softmax_attention = softmax_attention.permute(0, 2, 3, 1)
print(f'attention size after {softmax_attention.size()}')
print(f'attention after {softmax_attention}')
#x = x.view(1, -1, width, height)# 变化成一个维度进行组卷积
#weight = self.weight.view(self.K, -1)
# 动态卷积的权重的生成, 生成的是batch_size个卷积参数(每个参数不同)
#weight = weight.view(self.K, self.in_planes, self.out_planes)
# print(f'softmax_attention {softmax_attention.size()}')
# print(f'self.weight {self.weight.size()}')
weight = self.weight.view(self.K, -1)
print(f'weight size {weight.size()}')
print(f'weight {weight}')
aggregate_weight = torch.matmul(softmax_attention, weight).view(batch_size, height, width, self.out_planes, self.in_planes)# (N, H, W, C2, C1)
print(f'aggregate_weight size {aggregate_weight.size()}')
print(f'aggregate_weight {aggregate_weight}')
aggregate_weight = aggregate_weight.permute(3, 0, 4, 1, 2) # (C2, N, C1, H, W)
print(f'aggregate_weight after size {aggregate_weight.size()}')
print(f'aggregate_weight after {aggregate_weight}')
output = aggregate_weight * x[None, :, :, :, :]
# if self.bias is not None:
# aggregate_bias = torch.matmul(softmax_attention, self.bias).permute(0, 3, 1, 2) # (N, C1, H, W)
# print(aggregate_bias.size())
# print(softmax_attention.size())
# output = output + aggregate_bias
output = output.sum(dim=0) # (N, C1, H, W)
return residule + output
dy1 = Dynamic_conv1d(2, 1)
x = torch.tensor([[[[1, 2],[3, 4]],[[5, 6],[7, 8]]]], dtype=torch.float32)
y = torch.tensor([[[[1,2],[3,4]]]], dtype=torch.float32)
print(f'x size {x.size()}')
print(f'x {x}')
print(f'y size {y.size()}')
print(f'y {y}')
result = dy1(x, y)
print(f'output size {result.size()}')
print(f'output {result}')
|
johnran103/mmdet
|
test_dy_conv.py
|
test_dy_conv.py
|
py
| 5,635 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.kaiming_normal_",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.constant_",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.constant_",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.constant_",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "torch.randn",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "torch.zeros",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "torch.nn.init.kaiming_uniform_",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "torch.matmul",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 156,
"usage_type": "attribute"
}
] |
18307407152
|
import importlib.util as iutil
import os
from datetime import datetime
from time import perf_counter
from uuid import uuid4
import numpy as np
import yaml
from aequilibrae.distribution.ipf_core import ipf_core
from aequilibrae.context import get_active_project
from aequilibrae.matrix import AequilibraeMatrix, AequilibraeData
from aequilibrae.project.data.matrix_record import MatrixRecord
spec = iutil.find_spec("openmatrix")
has_omx = spec is not None
class Ipf:
"""Iterative proportional fitting procedure
.. code-block:: python
>>> from aequilibrae import Project
>>> from aequilibrae.distribution import Ipf
>>> from aequilibrae.matrix import AequilibraeMatrix, AequilibraeData
>>> project = Project.from_path("/tmp/test_project_ipf")
>>> matrix = AequilibraeMatrix()
# Here we can create from OMX or load from an AequilibraE matrix.
>>> matrix.load('/tmp/test_project/matrices/demand.omx')
>>> matrix.computational_view()
>>> args = {"entries": matrix.zones, "field_names": ["productions", "attractions"],
... "data_types": [np.float64, np.float64], "memory_mode": True}
>>> vectors = AequilibraeData()
>>> vectors.create_empty(**args)
>>> vectors.productions[:] = matrix.rows()[:]
>>> vectors.attractions[:] = matrix.columns()[:]
# We assume that the indices would be sorted and that they would match the matrix indices
>>> vectors.index[:] = matrix.index[:]
>>> args = {
... "matrix": matrix, "rows": vectors, "row_field": "productions", "columns": vectors,
... "column_field": "attractions", "nan_as_zero": False}
>>> fratar = Ipf(**args)
>>> fratar.fit()
# We can get back to our OMX matrix in the end
>>> fratar.output.export("/tmp/to_omx_output.omx")
>>> fratar.output.export("/tmp/to_aem_output.aem")
"""
def __init__(self, project=None, **kwargs):
"""
Instantiates the Ipf problem
:Arguments:
**matrix** (:obj:`AequilibraeMatrix`): Seed Matrix
**rows** (:obj:`AequilibraeData`): Vector object with data for row totals
**row_field** (:obj:`str`): Field name that contains the data for the row totals
**columns** (:obj:`AequilibraeData`): Vector object with data for column totals
**column_field** (:obj:`str`): Field name that contains the data for the column totals
**parameters** (:obj:`str`, optional): Convergence parameters. Defaults to those in the parameter file
**nan_as_zero** (:obj:`bool`, optional): If Nan values should be treated as zero. Defaults to True
:Results:
**output** (:obj:`AequilibraeMatrix`): Result Matrix
**report** (:obj:`list`): Iteration and convergence report
**error** (:obj:`str`): Error description
"""
self.cpus = 0
self.parameters = kwargs.get("parameters", self.__get_parameters("ipf"))
# Seed matrix
self.matrix = kwargs.get("matrix", None) # type: AequilibraeMatrix
# NaN as zero
self.nan_as_zero = kwargs.get("nan_as_zero", True)
# row vector
self.rows = kwargs.get("rows", None)
self.row_field = kwargs.get("row_field", None)
self.output_name = kwargs.get("output", AequilibraeMatrix().random_name())
# Column vector
self.columns = kwargs.get("columns", None)
self.column_field = kwargs.get("column_field", None)
self.output = AequilibraeMatrix()
self.error = None
self.__required_parameters = ["convergence level", "max iterations", "balancing tolerance"]
self.error_free = True
self.report = [" ##### IPF computation ##### ", ""]
self.gap = None
self.procedure_date = ""
self.procedure_id = ""
def __check_data(self):
self.error = None
self.__check_parameters()
# check data types
if not isinstance(self.rows, AequilibraeData):
raise TypeError("Row vector needs to be an instance of AequilibraeData")
if not isinstance(self.columns, AequilibraeData):
raise TypeError("Column vector needs to be an instance of AequilibraeData")
if not isinstance(self.matrix, AequilibraeMatrix):
raise TypeError("Seed matrix needs to be an instance of AequilibraeMatrix")
# Check data type
if not np.issubdtype(self.matrix.dtype, np.floating):
raise ValueError("Seed matrix need to be a float type")
row_data = self.rows.data
col_data = self.columns.data
if not np.issubdtype(row_data[self.row_field].dtype, np.floating):
raise ValueError("production/rows vector must be a float type")
if not np.issubdtype(col_data[self.column_field].dtype, np.floating):
raise ValueError("Attraction/columns vector must be a float type")
# Check data dimensions
if not np.array_equal(self.rows.index, self.columns.index):
raise ValueError("Indices from row vector do not match those from column vector")
if not np.array_equal(self.matrix.index, self.columns.index):
raise ValueError("Indices from vectors do not match those from seed matrix")
# Check if matrix was set for computation
if self.matrix.matrix_view is None:
raise ValueError("Matrix needs to be set for computation")
else:
if len(self.matrix.matrix_view.shape[:]) > 2:
raise ValueError("Matrix' computational view needs to be set for a single matrix core")
if self.error is None:
# check balancing:
sum_rows = np.nansum(row_data[self.row_field])
sum_cols = np.nansum(col_data[self.column_field])
if abs(sum_rows - sum_cols) > self.parameters["balancing tolerance"]:
self.error = "Vectors are not balanced"
else:
# guarantees that they are precisely balanced
col_data[self.column_field][:] = col_data[self.column_field][:] * (sum_rows / sum_cols)
if self.error is not None:
self.error_free = False
def __check_parameters(self):
for i in self.__required_parameters:
if i not in self.parameters:
self.error = "Parameters error. It needs to be a dictionary with the following keys: "
for t in self.__required_parameters:
self.error = self.error + t + ", "
if self.error:
raise ValueError(self.error)
def fit(self):
"""Runs the IPF instance problem to adjust the matrix
Resulting matrix is the *output* class member
"""
self.procedure_id = uuid4().hex
self.procedure_date = str(datetime.today())
t = perf_counter()
self.__check_data()
if self.error_free:
max_iter = self.parameters["max iterations"]
conv_criteria = self.parameters["convergence level"]
if self.matrix.is_omx():
self.output = AequilibraeMatrix()
self.output.create_from_omx(
self.output.random_name(), self.matrix.file_path, cores=self.matrix.view_names
)
self.output.computational_view()
else:
self.output = self.matrix.copy(self.output_name, memory_only=True)
if self.nan_as_zero:
self.output.matrix_view[:, :] = np.nan_to_num(self.output.matrix_view)[:, :]
rows = self.rows.data[self.row_field]
columns = self.columns.data[self.column_field]
tot_matrix = np.nansum(self.output.matrix_view[:, :])
# Reporting
self.report.append("Target convergence criteria: " + str(conv_criteria))
self.report.append("Maximum iterations: " + str(max_iter))
self.report.append("")
self.report.append("Rows:" + str(self.rows.entries))
self.report.append("Columns: " + str(self.columns.entries))
self.report.append("Total of seed matrix: " + "{:28,.4f}".format(float(tot_matrix)))
self.report.append("Total of target vectors: " + "{:25,.4f}".format(float(np.nansum(rows))))
self.report.append("")
self.report.append("Iteration, Convergence")
self.gap = conv_criteria + 1
seed = np.array(self.output.matrix_view[:, :], copy=True)
iter, self.gap = ipf_core(
seed, rows, columns, max_iterations=max_iter, tolerance=conv_criteria, cores=self.cpus
)
self.output.matrix_view[:, :] = seed[:, :]
self.report.append(str(iter) + " , " + str("{:4,.10f}".format(float(np.nansum(self.gap)))))
self.report.append("")
self.report.append("Running time: " + str("{:4,.3f}".format(perf_counter() - t)) + "s")
def save_to_project(self, name: str, file_name: str, project=None) -> MatrixRecord:
"""Saves the matrix output to the project file
:Arguments:
**name** (:obj:`str`): Name of the desired matrix record
**file_name** (:obj:`str`): Name for the matrix file name. AEM and OMX supported
**project** (:obj:`Project`, Optional): Project we want to save the results to.
Defaults to the active project
"""
project = project or get_active_project()
mats = project.matrices
record = mats.new_record(name, file_name, self.output)
record.procedure_id = self.procedure_id
record.timestamp = self.procedure_date
record.procedure = "Iterative Proportional fitting"
record.save()
return record
def __tot_rows(self, matrix):
return np.nansum(matrix, axis=1)
def __tot_columns(self, matrix):
return np.nansum(matrix, axis=0)
def __factor(self, marginals, targets):
f = np.divide(targets, marginals) # We compute the factors
f[f == np.NINF] = 1 # And treat the errors
return f
def __get_parameters(self, model):
path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
with open(path + "/parameters.yml", "r") as yml:
path = yaml.safe_load(yml)
self.cpus = int(path["system"]["cpus"])
return path["distribution"][model]
|
AequilibraE/aequilibrae
|
aequilibrae/distribution/ipf.py
|
ipf.py
|
py
| 10,544 |
python
|
en
|
code
| 140 |
github-code
|
6
|
[
{
"api_name": "importlib.util.find_spec",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "importlib.util",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "aequilibrae.matrix.AequilibraeMatrix",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "aequilibrae.matrix.AequilibraeMatrix",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "aequilibrae.matrix.AequilibraeData",
"line_number": 119,
"usage_type": "argument"
},
{
"api_name": "aequilibrae.matrix.AequilibraeData",
"line_number": 122,
"usage_type": "argument"
},
{
"api_name": "aequilibrae.matrix.AequilibraeMatrix",
"line_number": 125,
"usage_type": "argument"
},
{
"api_name": "numpy.issubdtype",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "numpy.floating",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "numpy.issubdtype",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "numpy.floating",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "numpy.issubdtype",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "numpy.floating",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "numpy.array_equal",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.array_equal",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "numpy.nansum",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "numpy.nansum",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.today",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "time.perf_counter",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "aequilibrae.matrix.AequilibraeMatrix",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "numpy.nan_to_num",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "numpy.nansum",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "numpy.nansum",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "aequilibrae.distribution.ipf_core.ipf_core",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "numpy.nansum",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "aequilibrae.context.get_active_project",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "aequilibrae.project.data.matrix_record.MatrixRecord",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "numpy.nansum",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "numpy.nansum",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "numpy.divide",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "numpy.NINF",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 260,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "yaml.safe_load",
"line_number": 262,
"usage_type": "call"
}
] |
29007933984
|
# Databricks notebook source
from pyspark.sql.functions import expr, col
import pyspark.sql.functions as fn
sampleEmployee = spark.read.format("csv").option("header","true").load("dbfs:/FileStore/shared_uploads/[email protected]/us_500.csv")
# COMMAND ----------
employeeDF = sampleEmployee.withColumn('web', expr('explode(array_repeat(web,100))'))
# COMMAND ----------
employeeDF_grouped = employeeDF.groupby(['city'])
CityEmployeeDensity = employeeDF_grouped.agg(fn.count(col('email')).alias('countOfEmployees'))
# COMMAND ----------
employeeDF.createOrReplaceTempView("employeeDataFrame")
CityEmployeeDensity.createOrReplaceTempView("CityEmpDensity")
sequenceOfCityDF = sqlContext.sql(" select city, countOfEmployees, rank() over(order by countOfEmployees desc, city) as Sequence from CityEmpDensity ")
sequenceOfCityDF.createOrReplaceTempView("sequenceOfCityDataFrame")
VaccinationDrivePlan = sqlContext.sql(" SELECT EDF.*, SDF.Sequence FROM employeeDataFrame EDF INNER JOIN sequenceOfCityDataFrame SDF ON EDF.city = SDF.city ")
VaccinationDrivePlan.show()
# COMMAND ----------
VaccinationDrivePlan.createOrReplaceTempView("VaccinationlPlan")
noOfDaysVaccineDrive = sqlContext.sql("SELECT city, countOfEmployees, CEILING(countOfEmployees/100) as noOfDaysToCompleteVaccination FROM CityEmpDensity")
filnalVaccineDrive = noOfDaysVaccineDrive.withColumn('noOfDaysToCompleteVaccination', expr('explode(array_repeat(noOfDaysToCompleteVaccination,int(noOfDaysToCompleteVaccination)))'))
filnalVaccineDrive.createOrReplaceTempView("filnalVaccineDrive")
# COMMAND ----------
filnalVaccineSchedule_Sequential = sqlContext.sql("SELECT city,countOfEmployees AS countOfEmployeesOfCity, current_date() + ROW_NUMBER() OVER(order by countOfEmployees desc ) - 1 AS VaccineScheduleDate FROM filnalVaccineDrive")
filnalVaccineSchedule_Sequential.show()
# COMMAND ----------
filnalVaccineSchedule_Parallel = sqlContext.sql("SELECT city,countOfEmployees AS countOfEmployeesOfCity, current_date() + ROW_NUMBER() OVER(partition by city order by countOfEmployees desc ) - 1 AS VaccineScheduleDate FROM filnalVaccineDrive")
filnalVaccineSchedule_Parallel.show()
# COMMAND ----------
noOfDaysVaccineDriveForCity = noOfDaysVaccineDrive
noOfDaysVaccineDriveForCity.show()
# COMMAND ----------
|
bhaskar553/DatabricksAssignment
|
Vaccine Drive Assignment.py
|
Vaccine Drive Assignment.py
|
py
| 2,302 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pyspark.sql.functions.expr",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions.count",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.col",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions.expr",
"line_number": 31,
"usage_type": "call"
}
] |
43734225885
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 10 19:03:44 2021
@author: Samael Olascoaga
@email: [email protected]
"""
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv('drugbank.csv')
overlap = []
for i in range(0, 1000000):
set1 = set(df['ID'].sample(n=550, replace=True))
set2 = set(df['ID'].sample(n=409, replace=True))
overlap.append(len(set1.intersection(set2)))
overlap = np.asarray(overlap, dtype=float)
p = ((overlap >= 182).sum() / i)
print(p)
sns.set_style("white")
sns.despine()
#sns.distplot(degree_list, kde=False, rug=False)
g = sns.histplot(overlap, log_scale=False, fill=False, color='k', bins=17)
sns.despine()
plt.ylabel("Frequency")
plt.xlabel("Overlap")
#plt.title("")
sns.despine()
fig = g.get_figure()
fig.savefig(r'target_bootstrap' + '.svg', format='svg', dpi=600, bbox_inches="tight")
|
Olascoaga/Senotherapy
|
bootstrapping_targets.py
|
bootstrapping_targets.py
|
py
| 938 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "seaborn.set_style",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "seaborn.despine",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "seaborn.histplot",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "seaborn.despine",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "seaborn.despine",
"line_number": 33,
"usage_type": "call"
}
] |
29643271631
|
# -*- coding: utf-8 -*-
# (c) 2015 Alfredo de la Fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api
from dateutil.relativedelta import relativedelta
class ProcurementOrder(models.Model):
_inherit = 'procurement.order'
@api.multi
def _compute_protect_date_planned(self):
for proc in self:
proc.protect_date_planned = False
if (proc.purchase_line_id and
proc.purchase_line_id.order_id.state != 'draft'):
proc.protect_date_planned = True
plan = fields.Many2one('procurement.plan', string='Plan')
location_type = fields.Selection([
('supplier', 'Supplier Location'),
('view', 'View'),
('internal', 'Internal Location'),
('customer', 'Customer Location'),
('inventory', 'Inventory'),
('procurement', 'Procurement'),
('production', 'Production'),
('transit', 'Transit Location')],
string='Location Type', related="location_id.usage", store=True)
protect_date_planned = fields.Boolean(
string='Protect Date Planned', compute='_compute_protect_date_planned')
@api.model
def create(self, data):
if 'plan' in self.env.context and 'plan' not in data:
data['plan'] = self.env.context.get('plan')
procurement = super(ProcurementOrder, self).create(data)
return procurement
@api.multi
def button_remove_plan(self):
self.ensure_one()
template_obj = self.env['product.template']
result = template_obj._get_act_window_dict(
'procurement_plan.action_procurement_plan')
result['domain'] = "[('id', '=', " + str(self.plan.id) + ")]"
result['res_id'] = self.plan.id
result['view_mode'] = 'form'
result['views'] = []
self.plan.write({'procurement_ids': [[3, self.id]]})
return result
@api.multi
def button_run(self, autocommit=False):
for procurement in self:
procurement.with_context(plan=procurement.plan.id).run(
autocommit=autocommit)
procurement.plan._get_state()
plans = self.mapped('plan')
if not plans:
return True
res = {'view_type': 'form,tree',
'res_model': 'procurement.plan',
'view_id': False,
'type': 'ir.actions.act_window',
}
if len(plans) == 1:
res.update({'view_mode': 'form',
'res_id': plans[0].id,
'target': 'current'})
else:
res.update({'view_mode': 'tree',
'domain': [('id', 'in', plans.ids)],
'target': 'new'})
return res
@api.multi
def button_check(self, autocommit=False):
for procurement in self:
procurement.with_context(plan=procurement.plan.id).check(
autocommit=autocommit)
procurement.plan._get_state()
plans = self.mapped('plan')
if not plans:
return True
if not plans:
return True
res = {'view_type': 'form,tree',
'res_model': 'procurement.plan',
'view_id': False,
'type': 'ir.actions.act_window',
}
if len(plans) == 1:
res.update({'view_mode': 'form',
'res_id': plans[0].id,
'target': 'current'})
else:
res.update({'view_mode': 'tree',
'domain': [('id', 'in', plans.ids)],
'target': 'new'})
return res
@api.multi
def cancel(self):
super(ProcurementOrder, self).cancel()
for procurement in self:
if procurement.plan:
procurement.plan._get_state()
plans = self.mapped('plan')
if not plans:
return True
if not plans:
return True
res = {'view_type': 'form,tree',
'res_model': 'procurement.plan',
'view_id': False,
'type': 'ir.actions.act_window',
}
if len(plans) == 1:
res.update({'view_mode': 'form',
'res_id': plans[0].id,
'target': 'current'})
else:
res.update({'view_mode': 'tree',
'domain': [('id', 'in', plans.ids)],
'target': 'new'})
return res
@api.multi
def reset_to_confirmed(self):
super(ProcurementOrder, self).reset_to_confirmed()
for procurement in self:
if procurement.plan:
procurement.plan._get_state()
plans = self.mapped('plan')
if not plans:
return True
if not plans:
return True
res = {'view_type': 'form,tree',
'res_model': 'procurement.plan',
'view_id': False,
'type': 'ir.actions.act_window',
}
if len(plans) == 1:
res.update({'view_mode': 'form',
'res_id': plans[0].id,
'target': 'current'})
else:
res.update({'view_mode': 'tree',
'domain': [('id', 'in', plans.ids)],
'target': 'new'})
return res
@api.multi
def _change_date_planned_from_plan_for_po(self, days_to_sum):
for proc in self:
new_date = (fields.Datetime.from_string(proc.date_planned) +
(relativedelta(days=days_to_sum)))
proc.write({'date_planned': new_date})
if (proc.purchase_line_id and
proc.purchase_line_id.order_id.state == 'draft'):
proc.purchase_line_id.write({'date_planned': new_date})
|
odoomrp/odoomrp-wip
|
procurement_plan/models/procurement.py
|
procurement.py
|
py
| 5,931 |
python
|
en
|
code
| 119 |
github-code
|
6
|
[
{
"api_name": "openerp.models.Model",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "openerp.models",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "openerp.api.multi",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "openerp.api",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "openerp.fields.Many2one",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "openerp.fields",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "openerp.fields.Selection",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "openerp.fields",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "openerp.fields.Boolean",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "openerp.fields",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "openerp.api.model",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "openerp.api",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "openerp.api.multi",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "openerp.api",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "openerp.api.multi",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "openerp.api",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "openerp.api.multi",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "openerp.api",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "openerp.api.multi",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "openerp.api",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "openerp.api.multi",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "openerp.api",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "openerp.fields.Datetime.from_string",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "openerp.fields.Datetime",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "openerp.fields",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "dateutil.relativedelta.relativedelta",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "openerp.api.multi",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "openerp.api",
"line_number": 155,
"usage_type": "name"
}
] |
29446328549
|
# -*- coding: utf-8 -*-
import sys
import cv2
import mediapipe as mp
import re
import time
import threading
from PySide2 import QtCore, QtGui, QtWidgets
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from selenium import webdriver
from lib.handsign.gesture import define_gesture, find_gesture, handedness
from lib.sr.SR_edsr import sr_work
from socket import *
## ==> SPLASH SCREEN
from lib.ui.ui_splash_screen import Ui_SplashScreen
## ==> MAIN WINDOW
from lib.ui.ui_main import Ui_MainWindow
# Create Socket
clientSock = socket(AF_INET, SOCK_STREAM)
url = '192.168.43.145'
clientSock.connect((url, 2000))
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
## ==> GLOBALS
counter = 0
hands = None
cap_hand = None
cap_situ = None
right_prev = None
left_prev = None
left_count = 0
#Camera Command
camera_left = 0
camera_right = 0
camera_center = 0
# YOUR APPLICATION
class MainWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.logic_btn = False
# self.logic_dr = False
self.case = 0
# 버튼을 누르면 함수 실행
self.ui.pushButton.clicked.connect(self.btnClicked)
# self.ui.pushButton_2.clicked.connect(self.drClicked)
# set warning
self.ui.warning.setVisible(False)
# self.ui.warning.setVisible(False)
# set wait
self.ui.wait.setVisible(False)
def start(self):
global cap_hand
global cap_situ
global hands
global right_prev
global left_prev
global left_count
global camera_center
global camera_left
global camera_right
turn_on_esp = 0
while cap_hand.isOpened():
success, image = cap_hand.read()
success2, image2 = cap_situ.read()
if not success:
break
if not success2:
break
if success:
if turn_on_esp == 0:
esp_trd = threading.Thread(target=esp32_video, name="[Daemon2]", args=())
esp_trd.setDaemon(True)
esp_trd.start()
turn_on_esp += 1
# Resize Image
image = cv2.resize(image, dsize=(800, 600))
# Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
landmark = []
landmark_list = []
cnt = 0
cnt2 = 0
# Count number of loop when left hand gesture is not used
left_count += 1
# Interpret Hand Gesture & Control RC Car
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
for i in str(hand_landmarks).split():
is_num = bool(re.findall('\d+', i))
# Extract landmarks
if is_num is True:
if cnt < 3 and cnt2 == 0:
landmark.append(float(i))
cnt += 1
elif cnt == 3 and cnt2 == 0:
cnt2 = 1
elif cnt == 3 and cnt2 == 1:
cnt = 0
cnt2 = 0
if len(landmark) == 3:
landmark_list.append(landmark)
landmark = []
# Right Hand Gesture Controls
if find_gesture(define_gesture(landmark_list),
handedness(landmark_list[0], landmark_list[1])) != "None" and\
handedness(landmark_list[0], landmark_list[1]) == 'right':
cmd = find_gesture(define_gesture(landmark_list),
handedness(landmark_list[0], landmark_list[1]))
if right_prev != cmd:
right_prev = cmd
# Create Thread
t = threading.Thread(target=url_command_right, name="[Daemon]", args=(cmd,))
t.setDaemon(True)
t.start()
# Left Hand Gesture Controls
if find_gesture(define_gesture(landmark_list),
handedness(landmark_list[0], landmark_list[1])) != "None" and\
handedness(landmark_list[0], landmark_list[1]) == 'left':
cmd = find_gesture(define_gesture(landmark_list),
handedness(landmark_list[0], landmark_list[1]))
# Camera Command
if cmd == "Camera_LEFT" or cmd == "Camera_RIGHT" or cmd == "Camera_CENTER":
if cmd == "Camera_LEFT" and camera_left == 0:
left_prev = cmd
left_count = 0
camera_left = 1
camera_right = 0
camera_center = 0
# Create Thread
t = threading.Thread(target=url_command_left, name="[Daemon5]", args=(cmd,))
t.setDaemon(True)
t.start()
elif cmd == "Camera_RIGHT" and camera_right == 0:
left_prev = cmd
left_count = 0
camera_left = 0
camera_right = 1
camera_center = 0
# Create Thread
t = threading.Thread(target=url_command_left, name="[Daemon6]", args=(cmd,))
t.setDaemon(True)
t.start()
elif cmd == "Camera_CENTER" and camera_center == 0:
left_prev = cmd
left_count = 0
camera_left = 0
camera_right = 0
camera_center = 1
# Create Thread
t = threading.Thread(target=url_command_left, name="[Daemon7]", args=(cmd,))
t.setDaemon(True)
t.start()
if cmd == "Capture" and left_count > 3:
left_prev = cmd
left_count = 0
img_name = 'image/input.png'
cv2.imwrite(img_name, image2)
# SR Command
if left_prev != cmd and (cmd != "Camera_LEFT" or cmd != "Camera_RIGHT" or cmd != "Capture"):
left_prev = cmd
if cmd == "Work SR Engine":
t = threading.Thread(target=sr_work, name="[Daemon4]", args=())
t.setDaemon(True)
t.start()
self.ui.wait.setVisible(True)
if cmd == "SR Done":
self.ui.wait.setVisible(False)
print(find_gesture(define_gesture(landmark_list),
handedness(landmark_list[0], landmark_list[1])))
print(handedness(landmark_list[0], landmark_list[1]))
self.ui.cmd.setText(f"{find_gesture(define_gesture(landmark_list), handedness(landmark_list[0], landmark_list[1]))}\n"
f"{handedness(landmark_list[0], landmark_list[1])}")
self.ui.cmd.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.ui.cmd.repaint()
mp_drawing.draw_landmarks(
image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
self.displayHandSign(image)
self.displayCCTV(image2)
#self.displayRCCAR(image2)
self.displayCaptureImg()
self.displaySRImg()
#Keyboard
k = cv2.waitKey(0)
if k % 256 == 27:
# esc pressed --> break
break
elif k % 256 == 32:
# space pressed --> capture
img_name = '../../image/input.png'
cv2.imwrite(img_name, image)
hands.close()
cap_hand.release()
cap_situ.release()
cv2.destroyAllWindows()
def btnClicked(self):
if self.logic_btn == True:
self.logic_btn = False
# self.ui.rccarCam.setPixmap(None)
self.case += 1
self.ui.lcdNumber.display(self.case)
self.ui.warning.setVisible(False)
# self.ui.wait.setVisible(False)
# space pressed --> capture
else:
self.logic_btn = True
self.ui.warning.setVisible(True)
# self.ui.wait.setVisible(True)
def displayHandSign(self, img):
qformat = QImage.Format_Indexed8
if len(img.shape) == 3:
if img.shape[2] == 4:
qformat = QImage.Format_RGBA8888
else:
qformat = QImage.Format_RGB888
img = QImage(img, img.shape[1], img.shape[0], qformat)
img = img.rgbSwapped()
w = self.ui.handSign.width()
h = self.ui.handSign.height()
self.ui.handSign.setPixmap(QPixmap.fromImage(img).scaled(w, h, Qt.KeepAspectRatioByExpanding))
# self.ui.handSign.setPixmap(QPixmap.fromImage(img))
# 가운데 맞춤
self.ui.handSign.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
def displayRCCAR(self, img):
qformat = QImage.Format_Indexed8
if len(img.shape) == 3:
if img.shape[2] == 4:
qformat = QImage.Format_RGBA8888
else:
qformat = QImage.Format_RGB888
img = QImage(img, img.shape[1], img.shape[0], qformat)
img = img.rgbSwapped()
w = self.ui.handSign.width()
h = self.ui.handSign.height()
self.ui.cctv.setPixmap(QPixmap.fromImage(img).scaled(w, h, Qt.KeepAspectRatioByExpanding))
# self.ui.cctv.setPixmap(QPixmap.fromImage(img))
# 가운데 맞춤
self.ui.cctv.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
# self.ui.situation2.setPixmap(QPixmap.fromImage(img))
# # 가운데 맞춤
# self.ui.situation2.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
def displayCCTV(self, img):
qformat = QImage.Format_Indexed8
if len(img.shape) == 3:
if img.shape[2] == 4:
qformat = QImage.Format_RGBA8888
else:
qformat = QImage.Format_RGB888
img = QImage(img, img.shape[1], img.shape[0], qformat)
img = img.rgbSwapped()
w = self.ui.handSign.width()
h = self.ui.handSign.height()
self.ui.rccarCam.setPixmap(QPixmap.fromImage(img).scaled(w, h, Qt.KeepAspectRatioByExpanding))
# self.ui.rccarCam.setPixmap(QPixmap.fromImage(img))
# 가운데 맞춤
self.ui.rccarCam.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
def displayCaptureImg(self):
img = QPixmap.fromImage('../../image/input.png')
w = self.ui.cap_img.width()
h = self.ui.cap_img.height()
self.ui.cap_img.setPixmap(img.scaled(w, h, Qt.KeepAspectRatioByExpanding))
# 가운데 맞춤
self.ui.cap_img.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
def displaySRImg(self):
img = QPixmap.fromImage('../../image/upscaled.png')
w = self.ui.sr_img.width()
h = self.ui.sr_img.height()
self.ui.sr_img.setPixmap(img.scaled(w, h, Qt.KeepAspectRatioByExpanding))
# 가운데 맞춤
self.ui.sr_img.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
# SPLASH SCREEN
class SplashScreen(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.ui = Ui_SplashScreen()
self.ui.setupUi(self)
## REMOVE TITLE BAR
self.setWindowFlag(QtCore.Qt.FramelessWindowHint)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
## DROP SHADOW EFFECT
self.shadow = QGraphicsDropShadowEffect(self)
self.shadow.setBlurRadius(20)
self.shadow.setXOffset(0)
self.shadow.setYOffset(0)
self.shadow.setColor(QColor(0, 0, 0, 60))
self.ui.dropShadowFrame.setGraphicsEffect(self.shadow)
## QTIMER ==> START
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.progress)
# TIMER IN MILLISECONDS
self.timer.start(35)
# # Change Texts
# QtCore.QTimer.singleShot(1500, lambda: self.ui.label_description.setText("<strong>LOADING</strong> DATABASE"))
# QtCore.QTimer.singleShot(3000, lambda: self.ui.label_description.setText("<strong>LOADING</strong> USER INTERFACE"))
## SHOW ==> MAIN WINDOW
self.show()
## ==> APP FUNCTIONS
def progress(self):
global counter
global hands
global cap_hand
global cap_situ
# SET VALUE TO PROGRESS BAR
self.ui.progressBar.setValue(counter)
if hands is None:
self.ui.label_loading.setText("load mediapipe...")
self.ui.label_loading.repaint()
hands = mp_hands.Hands(
min_detection_confidence=0.7, min_tracking_confidence=0.5)
cap_hand = cv2.VideoCapture(0)
cap_situ = cv2.VideoCapture(1)
counter = 20
self.ui.label_loading.setText("loading...")
# CLOSE SPLASH SCREE AND OPEN APP
if counter > 100:
# STOP TIMER
self.timer.stop()
# SHOW MAIN WINDOW
self.main = MainWindow()
self.main.show()
# CLOSE SPLASH SCREEN
self.close()
# START MAIN SCREEN
self.main.start()
# INCREASE COUNTER
counter += 4
def url_command_right(cmd):
try:
clientSock.send(cmd.encode('utf-8'))
except:
print("\n\n\n\nException Occur\n\n\n\n")
def url_command_left(cmd):
try:
clientSock.send(cmd.encode('utf-8'))
time.sleep(10)
except:
print("\n\n\n\nException Occur\n\n\n\n")
def esp32_video():
# change to your ESP32-CAM ip
wd = webdriver.Chrome(r'C:\Users\jji44\Desktop\chromedriver.exe')
url = 'http://192.168.43.159:81/stream'
wd.set_window_size(400, 400)
#wd.set
wd.get(url)
# url = "http://192.168.0.152:81/stream"
# CAMERA_BUFFRER_SIZE = 4096#4096
# stream = urlopen(url)
# bts = b''
#
# while True:
# try:
# bts += stream.read(CAMERA_BUFFRER_SIZE)
# jpghead = bts.find(b'\xff\xd8')
# jpgend = bts.find(b'\xff\xd9')
# if jpghead > -1 and jpgend > -1:
# jpg = bts[jpghead:jpgend + 2]
# bts = bts[jpgend + 2:]
# image3 = cv2.imdecode(np.frombuffer(jpg, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
# image3 = cv2.resize(image3, (640, 480))
# MainWindow.displayRCCAR(window.main, image3)
# except Exception as e:
# print("Error:" + str(e))
# bts = b''
# stream = urlopen(url)
# continue
if __name__ == "__main__":
app = QApplication(sys.argv)
window = SplashScreen()
try:
sys.exit(app.exec_())
except:
print('exciting')
|
cheeseBG/EmergencyResponseSystem
|
main.py
|
main.py
|
py
| 17,061 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "mediapipe.solutions",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "mediapipe.solutions",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "lib.ui.ui_main.Ui_MainWindow",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "cv2.flip",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_RGB2BGR",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "re.findall",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "lib.handsign.gesture.find_gesture",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "lib.handsign.gesture.define_gesture",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "lib.handsign.gesture.handedness",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "lib.handsign.gesture.handedness",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "lib.handsign.gesture.find_gesture",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "lib.handsign.gesture.define_gesture",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "lib.handsign.gesture.handedness",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "lib.handsign.gesture.find_gesture",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "lib.handsign.gesture.define_gesture",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "lib.handsign.gesture.handedness",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "lib.handsign.gesture.handedness",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "lib.handsign.gesture.find_gesture",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "lib.handsign.gesture.define_gesture",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "lib.handsign.gesture.handedness",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "lib.sr.SR_edsr.sr_work",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "lib.handsign.gesture.find_gesture",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "lib.handsign.gesture.define_gesture",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "lib.handsign.gesture.handedness",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "lib.handsign.gesture.handedness",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "lib.handsign.gesture.find_gesture",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "lib.handsign.gesture.define_gesture",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "lib.handsign.gesture.handedness",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "lib.handsign.gesture.handedness",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "PySide2.QtCore.Qt",
"line_number": 230,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore",
"line_number": 230,
"usage_type": "name"
},
{
"api_name": "cv2.waitKey",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "PySide2.QtCore.Qt",
"line_number": 286,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore",
"line_number": 286,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.Qt",
"line_number": 304,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore",
"line_number": 304,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.Qt",
"line_number": 325,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore",
"line_number": 325,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.Qt",
"line_number": 333,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore",
"line_number": 333,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.Qt",
"line_number": 341,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore",
"line_number": 341,
"usage_type": "name"
},
{
"api_name": "lib.ui.ui_splash_screen.Ui_SplashScreen",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "PySide2.QtCore.Qt",
"line_number": 354,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore",
"line_number": 354,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.Qt",
"line_number": 355,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore",
"line_number": 355,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.QTimer",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "PySide2.QtCore",
"line_number": 366,
"usage_type": "name"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 428,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 435,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 466,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 469,
"usage_type": "call"
}
] |
33869960923
|
import fasttext
import pickle
model = fasttext.load_model('/data/disk1/private/yx/model200v2_8.bin', encoding='utf-8')
(wordnum,vec_size) = (len(model.words),model.dim)
word2id = {}
vecList = []
for idx,word in enumerate(model.words):
word2id[word] = idx
vecList.append(model[word])
with open("/data/disk1/private/yx/word2id.pkl","wb") as f:
pickle.dump((wordnum,vec_size),f)
pickle.dump(word2id,f)
import numpy as np
vecnp = np.asarray(vecList)
print(vecnp.shape)
np.save("/data/disk1/private/yx/vec_nor.npy",vecnp)
|
xcjthu/TopTextClassification
|
utils/powerlawtools/fastmodeltrans.py
|
fastmodeltrans.py
|
py
| 533 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "fasttext.load_model",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 16,
"usage_type": "call"
}
] |
23213929420
|
"""
IMU 6-DOF
Acceleration
- imu_accel_x
- imu_accel_y
- imu_accel_z
Angular speed
- imu_gyro_x
- imu_gyro_y
- imu_gyro_z
"""
import numpy as np
from numpy.linalg import inv
from scipy.spatial.transform import Rotation as rot
"""
X: states:
- pitch
- roll
- yaw (not used)
- bias angular rate pitch
- bias angular rate roll
- bias angular rate yaw
Note: In order to compute yaw, an additional sensor like a magnetometer is required.
u: inputs
- Euler angles
"""
class INS_filter:
def __init__(self, data):
dt = 1e-2
self.X = np.zeros([6,1]) # error in Euler angles, gyro biases
self.X[0] = -np.arctan2(data["imu_accel_y"], np.sqrt(data["imu_accel_y"]**2+data["imu_accel_z"]**2))
self.X[1] = np.arctan2(data["imu_accel_x"], np.sqrt(data["imu_accel_x"]**2+data["imu_accel_z"]**2))
self.Cnb = rot.from_euler("xyz", self.X[0:3].transpose()).as_matrix()[0]
self.P = np.identity(6)
# Process model
self.F = np.identity(6)
self.F[0:3,3:6] = -dt*self.Cnb
# Control action model
self.B = np.zeros([6,3])
self.B[0:3, 0:3] = np.identity(3)*dt
# Observation matrix
self.H = np.zeros([3,6])
self.H[0:3, 0:3] = np.identity(3)
# Process noise matrix
self.gyro_psd = 3.5e-4
self.gyro_bias_psd = 1e-7
self.Q = np.zeros([6,6])
self.updateQ(dt)
# Sensor noise matrix (accel)
self.R = np.zeros([3,3])
self.R[0][0] = 5
self.R[1][1] = 5
self.R[2][2] = 5
def updateQ(self, dt):
self.Q[0:3, 0:3] = np.identity(3)*self.gyro_psd*dt
self.Q[3:6, 3:6] = np.identity(3) * self.gyro_bias_psd * dt
def predict(self, w, dt): # w is the angular rate vector
self.Cnb = rot.from_euler("xyz", self.X[0:3].transpose()).as_matrix()[0]
u = w.transpose()
self.updateQ(dt)
#update dt
self.F[0:3,3:6] = -dt*self.Cnb
self.B[0:3, 0:3] = dt*self.Cnb
# build pseudo control var u
self.X = [email protected] + self.B@u
self.P = [email protected]@self.F.transpose() + self.Q
def updateAttitude(self, a_bib):
z = self.getEulerAnglesFromAccel(a_bib.transpose())
y = z - [email protected]
S = [email protected]@self.H.transpose() + self.R
K = ([email protected]())@inv(S)
self.X = self.X+K@y
I = np.identity(6)
self.P = ([email protected])@self.P
def getEulerAnglesFromAccel(self, a_bib):
eul_nb = np.zeros([3,1])
eul_nb[0] = -np.arctan2(a_bib[1], np.sqrt(a_bib[1]**2+a_bib[2]**2))
eul_nb[1] = np.arctan2(a_bib[0], np.sqrt(a_bib[0]**2+a_bib[2]**2))
return eul_nb
def get_states(self):
return {"roll": np.asscalar(self.X[0])*180/np.pi,
"pitch": np.asscalar(self.X[1])*180/np.pi,
"yaw": np.asscalar(self.X[2])*180/np.pi,
"gyro_bias_roll": np.asscalar(self.X[3])*180/np.pi,
"gyro_bias_pitch": np.asscalar(self.X[4])*180/np.pi}
def run_filter_simulation(df):
import time
start = time.time()
init = False
kf_ins_res = {"roll": [], "pitch":[], "yaw":[], "gyro_bias_roll":[], "gyro_bias_pitch":[]}
last_time = 0
for index, row in df.iterrows():
if not init:
ins_kf = INS_filter(row)
init = True
last_time = row["time"] - 1e-2
# Note: in a real-time system, the prediction step should run at each iteration
# This hack is only used here for simulation purposes
if row["imu_new_data"]:
dt = row["time"] - last_time
ins_kf.predict(np.matrix([row["imu_gyro_x"], row["imu_gyro_y"], row["imu_gyro_z"]]), dt)
last_time = row["time"]
if row["imu_new_data"]:
ins_kf.updateAttitude(np.matrix([row["imu_accel_x"], row["imu_accel_y"], row["imu_accel_z"]]))
res = ins_kf.get_states()
kf_ins_res["roll"].append(res["roll"])
kf_ins_res["pitch"].append(res["pitch"])
kf_ins_res["yaw"].append(res["yaw"])
kf_ins_res["gyro_bias_roll"].append(res["gyro_bias_roll"])
kf_ins_res["gyro_bias_pitch"].append(res["gyro_bias_pitch"])
end = time.time()
print(f"Execution time: {end - start} s")
import matplotlib.pyplot as plt
f, ax = plt.subplots(4, 1)
ax[0].set_title("Roll")
ax[0].plot(df["time"], kf_ins_res["roll"], label="roll")
ax[1].set_title("Pitch")
ax[1].plot(df["time"], kf_ins_res["pitch"], label="pitch")
ax[2].set_title("Gyro bias roll")
ax[2].plot(df["time"], kf_ins_res["gyro_bias_roll"], label="gyro_bias_roll")
ax[3].set_title("Gyro bias pitch")
ax[3].plot(df["time"], kf_ins_res["gyro_bias_pitch"], label="gyro_bias_pitch")
plt.subplots_adjust(hspace=0.4)
f.canvas.set_window_title('Kalman Filter INS')
f.suptitle("Kalman Filter INS")
# f.legend()
plt.show()
if __name__ == "__main__":
import pandas as pd
data = pd.read_csv("gns_ins_data2.csv")
run_filter_simulation(data)
|
toshiharutf/Kalman_Filter_GNS_INS
|
ins_filter_full_state_demo.py
|
ins_filter_full_state_demo.py
|
py
| 5,133 |
python
|
en
|
code
| 6 |
github-code
|
6
|
[
{
"api_name": "numpy.zeros",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.arctan2",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.arctan2",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.transform.Rotation.from_euler",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.transform.Rotation",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "numpy.identity",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.identity",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.identity",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.identity",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.identity",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.identity",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.transform.Rotation.from_euler",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.transform.Rotation",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "numpy.linalg.inv",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.identity",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.arctan2",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "numpy.arctan2",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "numpy.asscalar",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "numpy.asscalar",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "numpy.asscalar",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "numpy.asscalar",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "numpy.asscalar",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "numpy.matrix",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "numpy.matrix",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots_adjust",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 176,
"usage_type": "call"
}
] |
10211319525
|
# -*- coding: utf8 -*-
from django.test import TestCase
from django.apps import apps
from blog.models import ExecuteStatus, Tag
from blog.models import TestCase as TC
from django.contrib.auth.models import User
import datetime
import os
class TestCaseModelTestCase(TestCase):
def setUp(self):
#apps.get_app_config()
#user = User.objects.create_superuser()
from django.utils import timezone
created_time = timezone.now()
tags = Tag.objects.order_by('?')
tag1 = tags.first()
tag2 = tags.last()
status = ExecuteStatus.objects.create(name='Testing')
#user = User.objects.get_by_natural_key('admin')
user = User.objects.create_superuser(
username='admin1',
email='[email protected]',
password='admin')
self.testcase = TC.objects.create(
name='1234',
created_time=created_time,
abstract='This is the',
execute_status=status,
author=user,
)
#testcase.tags.add(tag1, tag2)
#testcase.save()
def test_str_representation(self):
self.assertEqual(self.testcase.__str__(), self.testcase.name)
|
charleszh/rf-web
|
DjangoDemo/blog/tests/test_models.py
|
test_models.py
|
py
| 1,209 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.test.TestCase",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "blog.models.Tag.objects.order_by",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "blog.models.Tag.objects",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "blog.models.Tag",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "blog.models.ExecuteStatus.objects.create",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "blog.models.ExecuteStatus.objects",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "blog.models.ExecuteStatus",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.models.User.objects.create_superuser",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "blog.models.TestCase.objects.create",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "blog.models.TestCase.objects",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "blog.models.TestCase",
"line_number": 26,
"usage_type": "name"
}
] |
27646567910
|
import http.server
from colorama import Fore, Style
import os
import cgi
HOST_NAME = '127.0.0.1' # Kali IP address
PORT_NUMBER = 80 # Listening port number
class MyHandler(http.server.BaseHTTPRequestHandler): # MyHandler defines what we should do from the client / target
def do_GET(s):
# If we got a GET request, we will:-
s.send_response(200,message=None) # return HTML status 200 (OK)
s.send_header("Content-type", "text/html") # Inform the target that content type head
s.end_headers()
cmd = input(f"{Fore.LIGHTCYAN_EX}(Abuqasem)>{Style.RESET_ALL} ") # take user input
s.wfile.write(cmd.encode("utf-8")) # send the command which we got from the user input
def do_POST(s):
# If we got a POST, we will:-
s.send_response(200) # return HTML status 200 (OK)
s.end_headers()
length = int(s.headers['Content-Length']) # Define the length which means how many bytes
# value has to be integer
postVar = s.rfile.read(length) # Read then print the posted data
print(postVar.strip().decode("utf-8"), end="")
def getfile(s):
if s.path == '/store':
try:
ctype, pdict = cgi.parse_header(s.headers.getheader('content-type'))
if ctype == 'multipart/form-data':
fs = cgi.FieldStorage(fp=s.rfile,headers=s.headers,environ={'REQUEST_METHOD': 'POST'})
else:
print("[-] Unexpected POST request")
fs_up = fs['file']
with open('/proof.txt', 'wb') as o:
o.write(fs_up.file.read())
s.send_response(200)
s.end_headers()
except Exception as e:
print (e)
return
if __name__ == '__main__':
# We start a server_class and create httpd object and pass our kali IP,port number and cl
server_class = http.server.HTTPServer
httpd = server_class((HOST_NAME, PORT_NUMBER), MyHandler)
try:
print(f"{Fore.LIGHTGREEN_EX}(Listening on port)->[{PORT_NUMBER}]{Style.RESET_ALL}")
httpd.serve_forever() # start the HTTP server, however if we got ctrl+c we will Inter
except KeyboardInterrupt:
print(f"{Fore.RED}[!] Server is terminated{Style.RESET_ALL}")
httpd.server_close()
|
zAbuQasem/Misc
|
http reverse shell/Server.py
|
Server.py
|
py
| 2,367 |
python
|
en
|
code
| 6 |
github-code
|
6
|
[
{
"api_name": "http.server.server",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "http.server",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.LIGHTCYAN_EX",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "colorama.Style.RESET_ALL",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "colorama.Style",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "cgi.parse_header",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cgi.FieldStorage",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "http.server.server",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "http.server",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.LIGHTGREEN_EX",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "colorama.Style.RESET_ALL",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "colorama.Style",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RED",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "colorama.Style.RESET_ALL",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "colorama.Style",
"line_number": 53,
"usage_type": "name"
}
] |
13767499463
|
import os
import pytest
from stips import stips_data_base
# from stips.utilities import SelectParameter
# from stips.utilities.utilities import GetParameter
@pytest.fixture(autouse=True)
def pre_post_test():
# Setup config file environment variable
config_param = None
if "stips_config" in os.environ:
config_param = os.environ["stips_config"]
del os.environ["stips_config"]
# Setup stips_data_base by renaming any possible file
if os.path.exists(os.path.join(stips_data_base, "stips_config.yaml")):
os.rename(os.path.join(stips_data_base, "stips_config.yaml"),
os.path.join(stips_data_base, "stips_config_notused.yaml"))
# this is where the test function runs
yield
# Teardown config file environment variable
if config_param is not None:
os.environ["stips_config"] = config_param
# Teardown stips_data_base config file
if os.path.exists(os.path.join(stips_data_base, "stips_config_notused.yaml")):
os.rename(os.path.join(stips_data_base, "stips_config_notused.yaml"),
os.path.join(stips_data_base, "stips_config.yaml"))
def test_local_file(data_base):
config_file = os.path.join(data_base, "override_config.yaml")
with open(config_file, "w") as conf:
conf.write("observation_distortion_enable : true")
if os.path.exists(config_file):
os.remove(config_file)
def test_environment_variable(data_base):
config_file = os.path.join(data_base, "override_config.yaml")
with open(config_file, "w") as conf:
conf.write("observation_distortion_enable : true")
os.environ['stips_config'] = config_file
if os.path.exists(config_file):
os.remove(config_file)
if 'stips_config' in os.environ:
del os.environ['stips_config']
def test_data_variable(data_base):
config_file = os.path.join(stips_data_base, "stips_config.yaml")
with open(config_file, "w") as conf:
conf.write("observation_distortion_enable : true")
if os.path.exists(config_file):
os.remove(config_file)
|
spacetelescope/STScI-STIPS
|
stips/utilities/tests/test_config.py
|
test_config.py
|
py
| 2,091 |
python
|
en
|
code
| 12 |
github-code
|
6
|
[
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "stips.stips_data_base",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name": "os.rename",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "stips.stips_data_base",
"line_number": 20,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "stips.stips_data_base",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "stips.stips_data_base",
"line_number": 31,
"usage_type": "argument"
},
{
"api_name": "os.rename",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "stips.stips_data_base",
"line_number": 32,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "stips.stips_data_base",
"line_number": 33,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "pytest.fixture",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "stips.stips_data_base",
"line_number": 58,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 63,
"usage_type": "call"
}
] |
28300388553
|
import pandas as pd
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# Load the datasets
regular_season_results = pd.read_csv('MRegularSeasonDetailedResults.csv')
tournament_results = pd.read_csv('MNCAATourneyDetailedResults.csv')
# Merge regular season and tournament results
all_game_results = pd.concat([regular_season_results, tournament_results], ignore_index=True)
# Feature engineering and dataset preparation
all_game_results['point_diff'] = all_game_results['WScore'] - all_game_results['LScore']
all_game_results['team1_shooting_percentage'] = all_game_results['WFGM'] / all_game_results['WFGA']
all_game_results['team2_shooting_percentage'] = all_game_results['LFGM'] / all_game_results['LFGA']
all_game_results['rebounds_diff'] = all_game_results['WOR'] + all_game_results['WDR'] - (all_game_results['LOR'] + all_game_results['LDR'])
all_game_results['turnovers_diff'] = all_game_results['WTO'] - all_game_results['LTO']
X = all_game_results[['point_diff', 'team1_shooting_percentage', 'team2_shooting_percentage', 'rebounds_diff', 'turnovers_diff']]
y = (all_game_results['WTeamID'] < all_game_results['LTeamID']).astype(int)
# Split data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Train a Gradient Boosting Classifier
model = GradientBoostingClassifier(random_state=42)
model.fit(X_train, y_train)
# Evaluate the model
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(f'Model accuracy: {accuracy:.2f}')
def predict_winner(team1_id, team2_id, input_data, model):
prediction = model.predict(input_data)
return team1_id if prediction == 1 else team2_id
def calculate_team_average_stats(team_id, all_game_results):
team_games = all_game_results[(all_game_results['WTeamID'] == team_id) | (all_game_results['LTeamID'] == team_id)]
team_stats = {
'point_diff': [],
'team_shooting_percentage': [],
'rebounds_diff': [],
'turnovers_diff': []
}
for index, row in team_games.iterrows():
if row['WTeamID'] == team_id:
team_stats['point_diff'].append(row['WScore'] - row['LScore'])
team_stats['team_shooting_percentage'].append(row['WFGM'] / row['WFGA'])
team_stats['rebounds_diff'].append(row['WOR'] + row['WDR'] - (row['LOR'] + row['LDR']))
team_stats['turnovers_diff'].append(row['WTO'] - row['LTO'])
else:
team_stats['point_diff'].append(row['LScore'] - row['WScore'])
team_stats['team_shooting_percentage'].append(row['LFGM'] / row['LFGA'])
team_stats['rebounds_diff'].append(row['LOR'] + row['LDR'] - (row['WOR'] + row['WDR']))
team_stats['turnovers_diff'].append(row['LTO'] - row['WTO'])
average_stats = {
key: sum(values) / len(values)
for key, values in team_stats.items()
}
return average_stats
def predict_game(team1_id, team2_id, model, all_game_results):
team1_average_stats = calculate_team_average_stats(team1_id, all_game_results)
team2_average_stats = calculate_team_average_stats(team2_id, all_game_results)
input_data = pd.DataFrame([{
'point_diff': team1_average_stats['point_diff'] - team2_average_stats['point_diff'],
'team1_shooting_percentage': team1_average_stats['team_shooting_percentage'],
'team2_shooting_percentage': team2_average_stats['team_shooting_percentage'],
'rebounds_diff': team1_average_stats['rebounds_diff'] - team2_average_stats['rebounds_diff'],
'turnovers_diff': team1_average_stats['turnovers_diff'] - team2_average_stats['turnovers_diff']
}])
winner = predict_winner(team1_id, team2_id, input_data, model)
return winner
# Main loop for user input
while True:
print("Enter the team IDs for the two teams you want to predict (e.g. 1101 1102) or type 'exit' to quit:")
user_input = input()
if user_input.lower() == 'exit':
break
try:
team1_id, team2_id = map(int, user_input.split())
except ValueError:
print("Invalid input. Please enter two team IDs separated by a space.")
continue
winner = predict_game(team1_id, team2_id, model, all_game_results)
print(f'The predicted winner is: {winner}')
|
lakshayMahajan/March-Madness-ML
|
madness.py
|
madness.py
|
py
| 4,404 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.GradientBoostingClassifier",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 71,
"usage_type": "call"
}
] |
32312362218
|
from osgeo import gdal
import numpy as np
# calculating SAVI and NDVI
noDataVal = -28672
def calculate_ndvi(nir, red):
valid_mask = (nir != noDataVal) & (red != noDataVal)
ndvi_band = np.where(valid_mask, (nir - red) / (nir + red), np.nan)
return ndvi_band
# Function to calculate SAVI
def calculate_savi(nir, red):
soil_factor = 0.5
valid_mask = (nir != noDataVal) & (red != noDataVal)
savi_band = np.where(valid_mask,((1 + soil_factor) * (nir - red)) / (nir + red + soil_factor),np.nan)
return savi_band
def export_geotiff(src_dataset, band, output_path):
# Get the geotransform from the NIR dataset
geotransform = src_dataset.GetGeoTransform()
# Create the output GeoTIFF
driver = gdal.GetDriverByName('GTiff')
output_dataset = driver.Create(output_path, src_dataset.RasterXSize, src_dataset.RasterYSize, 1, gdal.GDT_Float32)
# Set the geotransform and projection
output_dataset.SetGeoTransform(geotransform)
output_dataset.SetProjection(src_dataset.GetProjection())
# Write the SAVI band to the output GeoTIFF
output_band = output_dataset.GetRasterBand(1)
output_band.WriteArray(band)
# Flush data to disk and close the output GeoTIFF
output_band.FlushCache()
output_dataset.FlushCache()
output_dataset = None
def export_savi_ndvi(nir_path, red_path):
savi_output_path = nir_path.replace("nir", "savi")
ndvi_output_path = nir_path.replace("nir", "ndvi")
# Open NIR and red GeoTIFF files
nir_dataset = gdal.Open(nir_path)
red_dataset = gdal.Open(red_path)
# Read NIR and red bands as NumPy arrays
nir_band = nir_dataset.GetRasterBand(1).ReadAsArray()
red_band = red_dataset.GetRasterBand(1).ReadAsArray()
savi_band = calculate_savi(nir_band, red_band)
ndvi_band = calculate_ndvi(nir_band, red_band)
export_geotiff(nir_dataset, savi_band, savi_output_path)
export_geotiff(nir_dataset, ndvi_band, ndvi_output_path)
print('exported', savi_output_path)
print('exported', ndvi_output_path)
# Paths to NIR and red GeoTIFF files
# nir_path = r'C:\Users\dusti\Desktop\GCERlab\ET_goes16\download_goes\datasets\images\goes\goes16\geonexl2\geotiffs\h14v04\2018\001\1600\nir_GO16_ABI12B_20180011600_GLBG_h14v04_02_proj.tif'
# red_path = r'C:\Users\dusti\Desktop\GCERlab\ET_goes16\download_goes\datasets\images\goes\goes16\geonexl2\geotiffs\h14v04\2018\001\1600\red_GO16_ABI12B_20180011600_GLBG_h14v04_02_proj.tif'
# nir_path = r'C:\Users\dnv22\Desktop\ET_goes16\download_goes\datasets\images\goes\goes16\geonexl2\geotiffs\h14v04\2018\001\1600\nir_GO16_ABI12B_20180011600_GLBG_h14v04_02_proj.tif'
# red_path= r'C:\Users\dnv22\Desktop\ET_goes16\download_goes\datasets\images\goes\goes16\geonexl2\geotiffs\h14v04\2018\001\1600\red_GO16_ABI12B_20180011600_GLBG_h14v04_02_proj.tif'
# export_savi_ndvi(nir_path, red_path)
|
dustnvan/ET_goes16
|
goes_export_geotiff/export_savi_ndvi.py
|
export_savi_ndvi.py
|
py
| 2,866 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.where",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "numpy.where",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "osgeo.gdal.GetDriverByName",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "osgeo.gdal.GDT_Float32",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "osgeo.gdal",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "osgeo.gdal.Open",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "osgeo.gdal.Open",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal",
"line_number": 45,
"usage_type": "name"
}
] |
648697707
|
import numbers
import time
from itertools import product
import numpy as np
import torch
try:
from tqdm import tqdm
except ImportError:
def tqdm(x):
return x
def product1d(inrange):
for ii in inrange:
yield ii
def slice_to_start_stop(s, size):
"""For a single dimension with a given size, normalize slice to size.
Returns slice(None, 0) if slice is invalid."""
if s.step not in (None, 1):
raise ValueError('Nontrivial steps are not supported')
if s.start is None:
start = 0
elif -size <= s.start < 0:
start = size + s.start
elif s.start < -size or s.start >= size:
return slice(None, 0)
else:
start = s.start
if s.stop is None or s.stop > size:
stop = size
elif s.stop < 0:
stop = (size + s.stop)
else:
stop = s.stop
if stop < 1:
return slice(None, 0)
return slice(start, stop)
def int_to_start_stop(i, size):
"""For a single dimension with a given size, turn an int into slice(start, stop)
pair."""
if -size < i < 0:
start = i + size
elif i >= size or i < -size:
raise ValueError('Index ({}) out of range (0-{})'.format(i, size - 1))
else:
start = i
return slice(start, start + 1)
def normalize_slices(slices, shape):
""" Normalize slices to shape.
Normalize input, which can be a slice or a tuple of slices / ellipsis to
be of same length as shape and be in bounds of shape.
Args:
slices (int or slice or ellipsis or tuple[int or slice or ellipsis]): slices to be normalized
Returns:
tuple[slice]: normalized slices (start and stop are both non-None)
tuple[int]: which singleton dimensions should be squeezed out
"""
type_msg = 'Advanced selection inappropriate. ' \
'Only numbers, slices (`:`), and ellipsis (`...`) are valid indices (or tuples thereof)'
if isinstance(slices, tuple):
slices_lst = list(slices)
elif isinstance(slices, (numbers.Number, slice, type(Ellipsis))):
slices_lst = [slices]
else:
raise TypeError(type_msg)
ndim = len(shape)
if len([item for item in slices_lst if item != Ellipsis]) > ndim:
raise TypeError("Argument sequence too long")
elif len(slices_lst) < ndim and Ellipsis not in slices_lst:
slices_lst.append(Ellipsis)
normalized = []
found_ellipsis = False
squeeze = []
for item in slices_lst:
d = len(normalized)
if isinstance(item, slice):
normalized.append(slice_to_start_stop(item, shape[d]))
elif isinstance(item, numbers.Number):
squeeze.append(d)
normalized.append(int_to_start_stop(int(item), shape[d]))
elif isinstance(item, type(Ellipsis)):
if found_ellipsis:
raise ValueError("Only one ellipsis may be used")
found_ellipsis = True
while len(normalized) + (len(slices_lst) - d - 1) < ndim:
normalized.append(slice(0, shape[len(normalized)]))
else:
raise TypeError(type_msg)
return tuple(normalized), tuple(squeeze)
def blocking(shape, block_shape, roi=None, center_blocks_at_roi=False):
""" Generator for nd blocking.
Args:
shape (tuple): nd shape
block_shape (tuple): nd block shape
roi (tuple[slice]): region of interest (default: None)
center_blocks_at_roi (bool): if given a roi,
whether to center the blocks being generated
at the roi's origin (default: False)
"""
assert len(shape) == len(block_shape), "Invalid number of dimensions."
if roi is None:
# compute the ranges for the full shape
ranges = [range(sha // bsha if sha % bsha == 0 else sha // bsha + 1)
for sha, bsha in zip(shape, block_shape)]
min_coords = [0] * len(shape)
max_coords = shape
else:
# make sure that the roi is valid
roi, _ = normalize_slices(roi, shape)
ranges = [range(rr.start // bsha,
rr.stop // bsha if rr.stop % bsha == 0 else rr.stop // bsha + 1)
for rr, bsha in zip(roi, block_shape)]
min_coords = [rr.start for rr in roi]
max_coords = [rr.stop for rr in roi]
need_shift = False
if roi is not None and center_blocks_at_roi:
shift = [rr.start % bsha for rr, bsha in zip(roi, block_shape)]
need_shift = sum(shift) > 0
# product raises memory error for too large ranges,
# because input iterators are cast to tuple
# so far I have only seen this for 1d "open-ended" datasets
# and hence just implemented a workaround for this case,
# but it should be fairly easy to implement an nd version of product
# without casting to tuple for our use case using the imglib loop trick, see also
# https://stackoverflow.com/questions/8695422/why-do-i-get-a-memoryerror-with-itertools-product
try:
start_points = product(*ranges)
except MemoryError:
assert len(ranges) == 1
start_points = product1d(ranges)
for start_point in start_points:
positions = [sp * bshape for sp, bshape in zip(start_point, block_shape)]
if need_shift:
positions = [pos + sh for pos, sh in zip(positions, shift)]
if any(pos > maxc for pos, maxc in zip(positions, max_coords)):
continue
yield tuple(slice(max(pos, minc), min(pos + bsha, maxc))
for pos, bsha, minc, maxc in zip(positions, block_shape,
min_coords, max_coords))
def ensure_5d(tensor):
if tensor.ndim == 3:
tensor = tensor[None, None]
elif tensor.ndim == 4:
tensor = tensor[None]
elif tensor.ndim == 5:
pass
return tensor
# we don't save any output, because this is just for benchmarking purposes
def run_inference(input_dataset, model,
block_shape, halo,
preprocess,
precision):
dtype = torch.float32 if precision == 'single' else torch.float16
device = torch.device('cuda')
model.to(device, dtype=dtype)
model.eval()
shape = input_dataset.shape
full_block_shape = tuple(bs + 2 * ha for bs, ha in zip(block_shape, halo))
local_bb = tuple(slice(ha, bsh - ha)
for bsh, ha in zip(block_shape, halo))
def grow_bounding_box(bb):
grown_bb = tuple(slice(max(b.start - ha, 0), min(sh, b.stop + ha))
for b, ha, sh in zip(bb, halo, shape))
return grown_bb
def ensure_block_shape(input_):
if input_.shape != full_block_shape:
pad_shape = [(0, bsh - sh)
for bsh, sh in zip(full_block_shape, input_.shape)]
input_ = np.pad(input_, pad_shape)
return input_
blocks = list(blocking(shape, block_shape))
per_block_times = []
t_tot = time.time()
with torch.no_grad():
for bb in tqdm(blocks):
bb = grow_bounding_box(bb)
input_ = input_dataset[bb]
input_ = ensure_block_shape(input_)
input_ = preprocess(input_)
input_ = ensure_5d(input_)
t0 = time.time()
input_ = torch.from_numpy(input_).to(device, dtype=dtype)
output = model(input_)
output = output.cpu().to(dtype=torch.float32).numpy()
per_block_times.append(time.time() - t0)
# this is where we would save the output ...
output = output[0]
output = output[(slice(None),) + local_bb]
t_tot = time.time() - t_tot
return t_tot, per_block_times
|
constantinpape/3d-unet-benchmarks
|
bench_util/inference.py
|
inference.py
|
py
| 7,760 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "numbers.Number",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "numbers.Number",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "itertools.product",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "torch.float16",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "numpy.pad",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 223,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 229,
"usage_type": "call"
}
] |
35093472448
|
import pygame, sys, operator, random, time
from pygame.locals import *
# Global variables
WIDTH = 800
HEIGHT = 500
SUB_SPEED = 3
BUBBLE_MAX_SPEED = 1
TIME_LIMIT = 30
BONUS_SCORE = 1500
BLACK = (0, 0, 0)
BLUE = (12,34,56)
RED = (255,0,0)
WHITE = (255,255,255)
x_sub = 40
y_sub = 250
score = 0
game_end = time.time() + TIME_LIMIT
bonus = 0
# bubbles_id = list()
bubbles_pos = list()
bubbles_speed = list()
bubbles_state = list()
bubbles_size = list()
# Quit the game
def leave_game():
pygame.display.quit()
pygame.quit()
sys.exit()
# Update the screen display
def update_screen ():
screen.blit(background_image, (0,0))
screen.blit(sub, (x_sub, y_sub))
for i in range(len(bubbles_pos) - 1, -1, -1):
if bubbles_state[i] == "Good":
screen.blit(pygame.transform.scale(blue_bubble, (bubbles_size[i], bubbles_size[i])), bubbles_pos[i])
else:
screen.blit(pygame.transform.scale(bad_bubble, (bubbles_size[i], bubbles_size[i])), bubbles_pos[i])
message = "Score : " + str(score)
display_text (message, BLACK, 'Calibri', 20, 10, 15)
# print ("Time : ", int(game_end - time.time()))
message = "Time : " + str(int(game_end - time.time()))
display_text (message, BLACK, 'Calibri', 20, 700, 15)
pygame.display.flip()
# Move the submarine on the scene
def sub_control():
global x_sub, y_sub
key = pygame.key.get_pressed()
if key[pygame.K_RIGHT]:
x_sub += SUB_SPEED
if key[pygame.K_LEFT]:
x_sub -= SUB_SPEED
if key[pygame.K_UP]:
y_sub -= SUB_SPEED
if key[pygame.K_DOWN]:
y_sub += SUB_SPEED
sub_in_scene()
# Check if the sub is still on the visible part of the screen
def sub_in_scene():
global x_sub, y_sub
if x_sub < 0:
x_sub = 0
if y_sub < 0:
y_sub = 0
if x_sub + sub.get_width() > WIDTH:
x_sub = WIDTH - sub.get_width()
if y_sub + sub.get_height() > HEIGHT:
y_sub = HEIGHT - sub.get_height()
# Create many bubbles
def create_bubbles(state) :
x_bubble = WIDTH
y_bubble = random.randint(0, HEIGHT)
if state == "Good":
#bubble = pygame.image.load("Ressources/bulle.png")
size_bubble = random.randint(blue_bubble.get_width() / 3, blue_bubble.get_width() * 2)
else:
#bubble = pygame.image.load("Ressources/red_bulle.png")
size_bubble = random.randint(bad_bubble.get_width(), bad_bubble.get_width() * 3)
# bubble = pygame.transform.scale (bubble, (size_bubble, size_bubble))
# bubbles_id.append(bubble)
bubbles_pos.append((x_bubble, y_bubble))
bubbles_speed.append(random.randint(1, BUBBLE_MAX_SPEED))
bubbles_state.append(state)
bubbles_size.append(size_bubble)
# Move the bubble on the screen at set speed
def move_bubbles():
for i in range (len(bubbles_pos) - 1, -1, -1) :
bubbles_pos[i] = tuple(map(operator.sub, bubbles_pos[i], (bubbles_speed[i], 0)))
# Update bubble position
def update_game():
global bonus, game_end
if (random.randint(1, 20) == 1):
create_bubbles("Good")
if (random.randint(1, 60) == 1):
create_bubbles("Bad")
collision()
if (int(score / BONUS_SCORE)) > bonus:
bonus += 1
game_end += TIME_LIMIT
move_bubbles()
clean_bubbles()
# Collision between the sub and the bubbles
def collision () :
global score, game_end
for bubble in range(len(bubbles_pos) -1, -1, -1):
if (x_sub < bubbles_pos[bubble][0] + bubbles_size[bubble]
and x_sub + sub.get_width() > bubbles_pos[bubble][0]
and y_sub < bubbles_pos[bubble][1] + bubbles_size[bubble]
and y_sub + sub.get_height() > bubbles_pos[bubble][1]) :
# print ("La bulle ", bubble, "se superpose au sous-marin")
print("etat de la bulle : ", bubbles_state[bubble])
if bubbles_state[bubble] == "Good":
score += bubbles_size[bubble] + bubbles_speed[bubble]
else:
game_end -= 5
# print ("points : ", score)
pop_sound.play(0)
delete_bubble (bubble)
# Delete Bubble when it collides with the submarine
def delete_bubble (bubble):
del bubbles_state[bubble]
del bubbles_speed[bubble]
del bubbles_pos[bubble]
del bubbles_size[bubble]
# del bubbles_id[bubble]
# Remove bubbles who leave the screen
def clean_bubbles ():
for i in range (len(bubbles_pos) - 1, -1, -1) :
if (bubbles_pos[i][0] + bubbles_size[i] < 0) :
delete_bubble(i)
# Display colored text in position X and Y
def display_text(text, color, font, font_size, x, y):
myfont = pygame.font.SysFont(font, font_size, True)
message = myfont.render(text, True, color)
screen.blit(message, (x,y))
# Game Over Screen
def game_over_message():
pygame.mixer.stop()
lose_sound.play(0)
screen.fill(BLUE)
display_text("GAME OVER !", RED, 'Calibri', 40, WIDTH * 0.4, HEIGHT * 0.2 )
message = "Ton Score : " + str(score)
display_text(message, RED, 'Calibri', 40, WIDTH * 0.37, HEIGHT * 0.4 )
display_text("Appuie sur R pour rejouer !", WHITE, 'Calibri', 30, WIDTH * 0.33, HEIGHT * 0.6)
# Initialize game variables when restart
def init_game():
global score, x_sub, y_sub, game_end, bubbles_pos, bubbles_size, bubbles_speed, bubbles_state
game_end = time.time() + TIME_LIMIT
score = 0
x_sub = 40
y_sub = 250
# bubbles_id = list()
bubbles_pos = list()
bubbles_size = list()
bubbles_speed = list()
bubbles_state = list()
# Window Init
pygame.init()
# Display creation
screen = pygame.display.set_mode ((WIDTH, HEIGHT))
# Set the repetition rate of the key
pygame.key.set_repeat(1, 1)
# Window Name
pygame.display.set_caption("Bubble Blaster")
# The Background image
background_image = pygame.image.load("Ressources/ocean.jpg")
# The submarine
sub = pygame.image.load("Ressources/submarine.png")
# The bubble
blue_bubble = pygame.image.load("Ressources/blue_bubble.png")
bad_bubble = pygame.image.load("Ressources/red_bubble.png")
pop_sound = pygame.mixer.Sound("Ressources/collect.wav")
ambient_sound = pygame.mixer.Sound("Ressources/ambient_music.wav")
lose_sound = pygame.mixer.Sound("Ressources/lose.wav")
ambient_sound.set_volume(0.05)
#create_bubble()
# Main loop
while True:
pygame.mixer.stop()
ambient_sound.play(-1)
# Time loop
while time.time() < game_end:
# move_bubble()
update_game()
update_screen()
# Main event loop
for event in pygame.event.get() :
if event.type == pygame.QUIT:
leave_game()
sub_control()
game_over_message()
pygame.display.flip()
restart = False
while not restart:
# Event Manager Loop
for event in pygame.event.get() :
if event.type == pygame.QUIT:
leave_game()
if not hasattr (event, 'key'):
continue
if event.key == K_r:
restart = True
init_game()
## if event.key == K_ESCAPE:
## leave_game()
|
nicoseng/bubble_blaster
|
test.py
|
test.py
|
py
| 7,112 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "time.time",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pygame.display.quit",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pygame.transform.scale",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pygame.display.flip",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "pygame.key.get_pressed",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "pygame.key",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_RIGHT",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_LEFT",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_UP",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_DOWN",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "operator.sub",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "pygame.font.SysFont",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.stop",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "pygame.init",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "pygame.key.set_repeat",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "pygame.key",
"line_number": 221,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 227,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 230,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 233,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Sound",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 236,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Sound",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 237,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Sound",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 238,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.stop",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 248,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "pygame.event.get",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 259,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 260,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.flip",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 265,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 272,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 273,
"usage_type": "attribute"
}
] |
22461213731
|
import xarray as xr
import numpy as np
#Este script baixa os dados do hycom para os períodos selecionados para o experimento GLBv0.08/expt_53.X
#Importante: Por conta da estruturas dos servidores OpenDAP, e preciso baixar o dado por cada passo de tempo para postriormente concaternar
#Para concatenar, selecionar os arquivos desejados e utilizar o CDO, portando, este processamento é melhor realizado numa máquina Linux.
#Comando: cdo cat <*.nc> <saidamodeloteste.nc>
expt = ['http://tds.hycom.org/thredds/dodsC/GLBv0.08/expt_56.3',
'http://tds.hycom.org/thredds/dodsC/GLBv0.08/expt_57.2',
'http://tds.hycom.org/thredds/dodsC/GLBv0.08/expt_57.7',
'http://tds.hycom.org/thredds/dodsC/GLBv0.08/expt_92.8',
'http://tds.hycom.org/thredds/dodsC/GLBv0.08/expt_92.9',
'http://tds.hycom.org/thredds/dodsC/GLBv0.08/expt_93.0',
]
#Parametros de entrada - Lembrando que as coordenadas deve ser passadas em WGS84 graus decimais
x = -73.575979
y = 11.552520
prof_ini = 0
prof_max = 1000
#Opcao para exportar area ao redor do ponto
#celulas ao redor. 0 para extrair apenas a localização mais proxima ao ponto
cell = 2
area = 0 + cell
for ex in expt:
hycom = xr.open_dataset(ex,decode_times=False,decode_cf=False)
if '_9' in ex:
hycom['lon'] = hycom.lon-360
#extraindo area ou pontos do HYCOM
if area ==0:
hycom = hycom.sel(lon=x, lat=y,method='nearest')
hycom = hycom.sel(depth = slice(prof_ini,prof_max))
if area >0:
#matriz de distancias
dist = ((hycom.lon-x)**2 + (hycom.lat-y)**2)**0.5
#procurar pelo indice do modelo com as coordenadas mais proximas ao dado
ind = np.unravel_index(np.argmin(dist, axis=None), dist.shape)
hycom = hycom.isel(lon=slice(ind[0]-area,ind[0]+area), lat=slice(ind[1]-area,ind[1]+area))
hycom = hycom.sel(depth = slice(prof_ini,prof_max))
#dropando informações nao necessarias
hycom = hycom.drop(['tau','surf_el','water_temp_bottom','salinity_bottom','water_u_bottom','water_v_bottom'])
for i in list(range(0,len(hycom.time))):
try:
hyc = hycom.isel(time = i)
hyc = hyc.load()
hyc.to_netcdf('Hycom_Expt{}_{}.nc'.format(ex[-4:],i))
except:
pass
|
Igoratake/Hycom_Opendap
|
baixa_hycom_2014_frente_Pontual.py
|
baixa_hycom_2014_frente_Pontual.py
|
py
| 2,248 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "xarray.open_dataset",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.unravel_index",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 46,
"usage_type": "call"
}
] |
30886261452
|
######### import statements for sample_models.py ###########
from keras import backend as K
from keras.models import Model
from keras.layers import (BatchNormalization, Conv1D, Dense, Input,
TimeDistributed, Activation, Bidirectional, SimpleRNN, GRU, LSTM)
################################
########### import statements for train_utils.py #############
# from data_generator import AudioGenerator ## Now codes of data_generator.py are pasted here. So I think that this import is useless
import _pickle as pickle
from keras import backend as K
from keras.models import Model
from keras.layers import (Input, Lambda, BatchNormalization)
from keras.optimizers import SGD, RMSprop
from keras.callbacks import ModelCheckpoint
import os
#####################################################
############ import and variable definitions for data_generator.py #############
import json
import numpy as np
import random
from python_speech_features import mfcc
import librosa
import scipy.io.wavfile as wav
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from utils import calc_feat_dim, spectrogram_from_file, text_to_int_sequence
from utils import conv_output_length
RNG_SEED = 123
######################################################################
##################### all codes of data_generator.py starts here ############################3
class AudioGenerator():
def __init__(self, step=10, window=20, max_freq=8000, mfcc_dim=13,
minibatch_size=20, desc_file=None, spectrogram=True, max_duration=10.0,
sort_by_duration=False):
"""
Params:
step (int): Step size in milliseconds between windows (for spectrogram ONLY)
window (int): FFT window size in milliseconds (for spectrogram ONLY)
max_freq (int): Only FFT bins corresponding to frequencies between
[0, max_freq] are returned (for spectrogram ONLY)
desc_file (str, optional): Path to a JSON-line file that contains
labels and paths to the audio files. If this is None, then
load metadata right away
"""
self.feat_dim = calc_feat_dim(window, max_freq) # spectogram
self.mfcc_dim = mfcc_dim
self.feats_mean = np.zeros((self.feat_dim,))
self.feats_std = np.ones((self.feat_dim,))
self.rng = random.Random(RNG_SEED)
if desc_file is not None:
self.load_metadata_from_desc_file(desc_file)
self.step = step
self.window = window
self.max_freq = max_freq
self.cur_train_index = 0
self.cur_valid_index = 0
self.cur_test_index = 0
self.max_duration=max_duration
self.minibatch_size = minibatch_size
self.spectrogram = spectrogram
self.sort_by_duration = sort_by_duration
def get_batch(self, partition):
""" Obtain a batch of train, validation, or test data
"""
if partition == 'train':
audio_paths = self.train_audio_paths
cur_index = self.cur_train_index
texts = self.train_texts
elif partition == 'valid':
audio_paths = self.valid_audio_paths
cur_index = self.cur_valid_index
texts = self.valid_texts
elif partition == 'test':
audio_paths = self.test_audio_paths
cur_index = self.test_valid_index
texts = self.test_texts
else:
raise Exception("Invalid partition. "
"Must be train/validation")
features = [self.normalize(self.featurize(a)) for a in
audio_paths[cur_index:cur_index+self.minibatch_size]]
# calculate necessary sizes
max_length = max([features[i].shape[0]
for i in range(0, self.minibatch_size)])
max_string_length = max([len(texts[cur_index+i])
for i in range(0, self.minibatch_size)])
# initialize the arrays
X_data = np.zeros([self.minibatch_size, max_length,
self.feat_dim*self.spectrogram + self.mfcc_dim*(not self.spectrogram)])
labels = np.ones([self.minibatch_size, max_string_length]) * 28 # blanks
input_length = np.zeros([self.minibatch_size, 1])
label_length = np.zeros([self.minibatch_size, 1])
for i in range(0, self.minibatch_size):
# calculate X_data & input_length
feat = features[i]
input_length[i] = feat.shape[0]
X_data[i, :feat.shape[0], :] = feat
# calculate labels & label_length
label = np.array(text_to_int_sequence(texts[cur_index+i]))
labels[i, :len(label)] = label
label_length[i] = len(label)
# return the arrays
outputs = {'ctc': np.zeros([self.minibatch_size])}
inputs = {'the_input': X_data,
'the_labels': labels,
'input_length': input_length,
'label_length': label_length
}
return (inputs, outputs)
def shuffle_data_by_partition(self, partition):
""" Shuffle the training or validation data
"""
if partition == 'train':
self.train_audio_paths, self.train_durations, self.train_texts = shuffle_data(
self.train_audio_paths, self.train_durations, self.train_texts)
elif partition == 'valid':
self.valid_audio_paths, self.valid_durations, self.valid_texts = shuffle_data(
self.valid_audio_paths, self.valid_durations, self.valid_texts)
else:
raise Exception("Invalid partition. "
"Must be train/validation")
def sort_data_by_duration(self, partition):
""" Sort the training or validation sets by (increasing) duration
"""
if partition == 'train':
self.train_audio_paths, self.train_durations, self.train_texts = sort_data(
self.train_audio_paths, self.train_durations, self.train_texts)
elif partition == 'valid':
self.valid_audio_paths, self.valid_durations, self.valid_texts = sort_data(
self.valid_audio_paths, self.valid_durations, self.valid_texts)
else:
raise Exception("Invalid partition. "
"Must be train/validation")
def next_train(self):
""" Obtain a batch of training data
"""
while True:
ret = self.get_batch('train')
self.cur_train_index += self.minibatch_size
if self.cur_train_index >= len(self.train_texts) - self.minibatch_size:
self.cur_train_index = 0
self.shuffle_data_by_partition('train')
yield ret
def next_valid(self):
""" Obtain a batch of validation data
"""
while True:
ret = self.get_batch('valid')
self.cur_valid_index += self.minibatch_size
if self.cur_valid_index >= len(self.valid_texts) - self.minibatch_size:
self.cur_valid_index = 0
self.shuffle_data_by_partition('valid')
yield ret
def next_test(self):
""" Obtain a batch of test data
"""
while True:
ret = self.get_batch('test')
self.cur_test_index += self.minibatch_size
if self.cur_test_index >= len(self.test_texts) - self.minibatch_size:
self.cur_test_index = 0
yield ret
def load_train_data(self, desc_file='train_corpus.json'):
self.load_metadata_from_desc_file(desc_file, 'train')
self.fit_train()
if self.sort_by_duration:
self.sort_data_by_duration('train')
def load_validation_data(self, desc_file='valid_corpus.json'):
self.load_metadata_from_desc_file(desc_file, 'validation')
if self.sort_by_duration:
self.sort_data_by_duration('valid')
def load_test_data(self, desc_file='test_corpus.json'):
self.load_metadata_from_desc_file(desc_file, 'test')
def load_metadata_from_desc_file(self, desc_file, partition):
""" Read metadata from a JSON-line file
(possibly takes long, depending on the filesize)
Params:
desc_file (str): Path to a JSON-line file that contains labels and
paths to the audio files
partition (str): One of 'train', 'validation' or 'test'
"""
audio_paths, durations, texts = [], [], []
with open(desc_file) as json_line_file:
for line_num, json_line in enumerate(json_line_file):
try:
spec = json.loads(json_line)
if float(spec['duration']) > self.max_duration:
continue
audio_paths.append(spec['key'])
durations.append(float(spec['duration']))
texts.append(spec['text'])
except Exception as e:
# Change to (KeyError, ValueError) or
# (KeyError,json.decoder.JSONDecodeError), depending on
# json module version
print('Error reading line #{}: {}'
.format(line_num, json_line))
if partition == 'train':
self.train_audio_paths = audio_paths
self.train_audio_paths = self.train_audio_paths[:500] # changed
self.train_durations = durations
self.train_durations = self.train_durations[:500] # changed
self.train_texts = texts
self.train_texts = self.train_texts[:500] # changed
elif partition == 'validation':
self.valid_audio_paths = audio_paths
self.valid_audio_paths = self.valid_audio_paths[:50] # changed
self.valid_durations = durations
self.valid_durations = self.valid_durations[:50] # changed
self.valid_texts = texts
self.valid_texts = self.valid_texts[:50] # changed
elif partition == 'test':
self.test_audio_paths = audio_paths
self.test_durations = durations
self.test_texts = texts
else:
raise Exception("Invalid partition to load metadata. "
"Must be train/validation/test")
def fit_train(self, k_samples=100):
""" Estimate the mean and std of the features from the training set
Params:
k_samples (int): Use this number of samples for estimation
"""
k_samples = min(k_samples, len(self.train_audio_paths))
samples = self.rng.sample(self.train_audio_paths, k_samples)
feats = [self.featurize(s) for s in samples]
feats = np.vstack(feats)
self.feats_mean = np.mean(feats, axis=0)
self.feats_std = np.std(feats, axis=0)
def featurize(self, audio_clip):
""" For a given audio clip, calculate the corresponding feature
Params:
audio_clip (str): Path to the audio clip
"""
if self.spectrogram:
return spectrogram_from_file(
audio_clip, step=self.step, window=self.window,
max_freq=self.max_freq)
else:
(rate, sig) = wav.read(audio_clip)
return mfcc(sig, rate, numcep=self.mfcc_dim)
def normalize(self, feature, eps=1e-14):
""" Center a feature using the mean and std
Params:
feature (numpy.ndarray): Feature to normalize
"""
return (feature - self.feats_mean) / (self.feats_std + eps)
def shuffle_data(audio_paths, durations, texts):
""" Shuffle the data (called after making a complete pass through
training or validation data during the training process)
Params:
audio_paths (list): Paths to audio clips
durations (list): Durations of utterances for each audio clip
texts (list): Sentences uttered in each audio clip
"""
p = np.random.permutation(len(audio_paths))
audio_paths = [audio_paths[i] for i in p]
durations = [durations[i] for i in p]
texts = [texts[i] for i in p]
return audio_paths, durations, texts
def sort_data(audio_paths, durations, texts):
""" Sort the data by duration
Params:
audio_paths (list): Paths to audio clips
durations (list): Durations of utterances for each audio clip
texts (list): Sentences uttered in each audio clip
"""
p = np.argsort(durations).tolist()
audio_paths = [audio_paths[i] for i in p]
durations = [durations[i] for i in p]
texts = [texts[i] for i in p]
return audio_paths, durations, texts
def vis_train_features(index=0):
""" Visualizing the data point in the training set at the supplied index
"""
# obtain spectrogram
audio_gen = AudioGenerator(spectrogram=True)
audio_gen.load_train_data()
vis_audio_path = audio_gen.train_audio_paths[index]
vis_spectrogram_feature = audio_gen.normalize(audio_gen.featurize(vis_audio_path))
# obtain mfcc
audio_gen = AudioGenerator(spectrogram=False)
audio_gen.load_train_data()
vis_mfcc_feature = audio_gen.normalize(audio_gen.featurize(vis_audio_path))
# obtain text label
vis_text = audio_gen.train_texts[index]
# obtain raw audio
vis_raw_audio, _ = librosa.load(vis_audio_path)
# print total number of training examples
print('There are %d total training examples.' % len(audio_gen.train_audio_paths))
# return labels for plotting
return vis_text, vis_raw_audio, vis_mfcc_feature, vis_spectrogram_feature, vis_audio_path
def plot_raw_audio(vis_raw_audio):
# plot the raw audio signal
fig = plt.figure(figsize=(12,3))
ax = fig.add_subplot(111)
steps = len(vis_raw_audio)
ax.plot(np.linspace(1, steps, steps), vis_raw_audio)
plt.title('Audio Signal')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.show()
def plot_mfcc_feature(vis_mfcc_feature):
# plot the MFCC feature
fig = plt.figure(figsize=(12,5))
ax = fig.add_subplot(111)
im = ax.imshow(vis_mfcc_feature, cmap=plt.cm.jet, aspect='auto')
plt.title('Normalized MFCC')
plt.ylabel('Time')
plt.xlabel('MFCC Coefficient')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
ax.set_xticks(np.arange(0, 13, 2), minor=False);
plt.show()
def plot_spectrogram_feature(vis_spectrogram_feature):
# plot the normalized spectrogram
fig = plt.figure(figsize=(12,5))
ax = fig.add_subplot(111)
im = ax.imshow(vis_spectrogram_feature, cmap=plt.cm.jet, aspect='auto')
plt.title('Normalized Spectrogram')
plt.ylabel('Time')
plt.xlabel('Frequency')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
plt.show()
################################# all codes of data_generator.py ends here ###########################3
# from data_generator import vis_train_features # ## Now codes of data_generator.py are pasted here. So I think that this import is useless
# extract label and audio features for a single training example
vis_text, vis_raw_audio, vis_mfcc_feature, vis_spectrogram_feature, vis_audio_path = vis_train_features()
# allocate 50% of GPU memory (if you like, feel free to change this)
from keras.backend.tensorflow_backend import set_session
from keras.optimizers import RMSprop, SGD
import tensorflow as tf
"""
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
set_session(tf.Session(config=config))
"""
# watch for any changes in the sample_models module, and reload it automatically
#%load_ext autoreload
#%autoreload 2
# import NN architectures for speech recognition
# from sample_models import * # I have pasted code of sample_models in this file. So no need to import this
# import function for training acoustic model
# from train_utils import train_model # I have pasted code of train_utils in this file. So no need to import this
import numpy as np
# from data_generator import AudioGenerator ## Now codes of data_generator.py are pasted here. So I think that this import is useless
from keras import backend as K
from utils import int_sequence_to_text
from IPython.display import Audio
###################### All codes / model defined in sample_models.py start here ################
def simple_rnn_model(input_dim, output_dim=29):
""" Build a recurrent network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# Add recurrent layer
simp_rnn = GRU(output_dim, return_sequences=True,
implementation=2, name='rnn')(input_data)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(simp_rnn)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: x
print(model.summary())
return model
def rnn_model(input_dim, units, activation, output_dim=29):
""" Build a recurrent network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# Add recurrent layer
simp_rnn = LSTM(units, activation=activation,
return_sequences=True, implementation=2, name='rnn')(input_data)
# TODO: Add batch normalization
bn_rnn = BatchNormalization(name='bn_rnn_1d')(simp_rnn)
# TODO: Add a TimeDistributed(Dense(output_dim)) layer
time_dense = TimeDistributed(Dense(output_dim))(bn_rnn)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: x
print(model.summary())
return model
def cnn_rnn_model(input_dim, filters, kernel_size, conv_stride,
conv_border_mode, units, output_dim=29):
""" Build a recurrent + convolutional network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# Add convolutional layer
conv_1d = Conv1D(filters, kernel_size,
strides=conv_stride,
padding=conv_border_mode,
activation='relu',
name='conv1d')(input_data)
# Add batch normalization
bn_cnn = BatchNormalization(name='bn_conv_1d')(conv_1d)
# Add a recurrent layer
simp_rnn = GRU(units, activation='relu',
return_sequences=True, implementation=2, name='rnn')(bn_cnn)
# TODO: Add batch normalization
bn_rnn = BatchNormalization(name='bn_rnn_1d')(simp_rnn)
# TODO: Add a TimeDistributed(Dense(output_dim)) layer
time_dense = TimeDistributed(Dense(output_dim))(bn_rnn)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: cnn_output_length(
x, kernel_size, conv_border_mode, conv_stride)
print(model.summary())
return model
def cnn_output_length(input_length, filter_size, border_mode, stride,
dilation=1):
""" Compute the length of the output sequence after 1D convolution along
time. Note that this function is in line with the function used in
Convolution1D class from Keras.
Params:
input_length (int): Length of the input sequence.
filter_size (int): Width of the convolution kernel.
border_mode (str): Only support `same` or `valid`.
stride (int): Stride size used in 1D convolution.
dilation (int)
"""
if input_length is None:
return None
assert border_mode in {'same', 'valid'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if border_mode == 'same':
output_length = input_length
elif border_mode == 'valid':
output_length = input_length - dilated_filter_size + 1
return (output_length + stride - 1) // stride
def deep_rnn_model(input_dim, units, recur_layers, output_dim=29):
""" Build a deep recurrent network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# TODO: Add recurrent layers, each with batch normalization
if recur_layers == 1:
layer = LSTM(units, return_sequences=True, activation='relu')(input_data)
layer = BatchNormalization(name='bt_rnn_1')(layer)
else:
layer = LSTM(units, return_sequences=True, activation='relu')(input_data)
layer = BatchNormalization(name='bt_rnn_1')(layer)
for i in range(recur_layers - 2):
layer = LSTM(units, return_sequences=True, activation='relu')(layer)
layer = BatchNormalization(name='bt_rnn_{}'.format(2+i))(layer)
layer = LSTM(units, return_sequences=True, activation='relu')(layer)
layer = BatchNormalization(name='bt_rnn_last_rnn')(layer)
# TODO: Add a TimeDistributed(Dense(output_dim)) layer
time_dense = TimeDistributed(Dense(output_dim))(layer)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: x
print(model.summary())
return model
def bidirectional_rnn_model(input_dim, units, output_dim=29):
""" Build a bidirectional recurrent network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# TODO: Add bidirectional recurrent layer
bidir_rnn = Bidirectional(LSTM(units, return_sequences=True, activation='relu'), merge_mode='concat')(input_data)
# TODO: Add a TimeDistributed(Dense(output_dim)) layer
time_dense = TimeDistributed(Dense(output_dim))(bidir_rnn)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: x
print(model.summary())
return model
def final_model(input_dim, filters, kernel_size, conv_stride,
conv_border_mode, units, output_dim=29, dropout_rate=0.5, number_of_layers=2,
cell=GRU, activation='tanh'):
""" Build a deep network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# TODO: Specify the layers in your network
conv_1d = Conv1D(filters, kernel_size,
strides=conv_stride,
padding=conv_border_mode,
activation='relu',
name='layer_1_conv',
dilation_rate=1)(input_data)
conv_bn = BatchNormalization(name='conv_batch_norm')(conv_1d)
if number_of_layers == 1:
layer = cell(units, activation=activation,
return_sequences=True, implementation=2, name='rnn_1', dropout=dropout_rate)(conv_bn)
layer = BatchNormalization(name='bt_rnn_1')(layer)
else:
layer = cell(units, activation=activation,
return_sequences=True, implementation=2, name='rnn_1', dropout=dropout_rate)(conv_bn)
layer = BatchNormalization(name='bt_rnn_1')(layer)
for i in range(number_of_layers - 2):
layer = cell(units, activation=activation,
return_sequences=True, implementation=2, name='rnn_{}'.format(i+2), dropout=dropout_rate)(layer)
layer = BatchNormalization(name='bt_rnn_{}'.format(i+2))(layer)
layer = cell(units, activation=activation,
return_sequences=True, implementation=2, name='final_layer_of_rnn')(layer)
layer = BatchNormalization(name='bt_rnn_final')(layer)
time_dense = TimeDistributed(Dense(output_dim))(layer)
# TODO: Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
# TODO: Specify model.output_length
model.output_length = lambda x: cnn_output_length(
x, kernel_size, conv_border_mode, conv_stride)
print(model.summary())
return model
##################################### code / model defined in sample_models.py ends here ##############################
########################## all codes of train_utils.py starts here #########################
def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
#print("y_pred.shape = " + str(y_pred.shape))
#print("labels.shape = " + str(labels.shape))
#print("input_length.shape = " + str(input_length.shape))
#print("label_length.shape = " + str(label_length.shape))
return K.ctc_batch_cost(labels, y_pred, input_length, label_length) # input_length= seq length of each item in y_pred
# label_length is the seq length of each item in labels
def add_ctc_loss(input_to_softmax):
the_labels = Input(name='the_labels', shape=(None,), dtype='float32')
input_lengths = Input(name='input_length', shape=(1,), dtype='int64')
label_lengths = Input(name='label_length', shape=(1,), dtype='int64')
output_lengths = Lambda(input_to_softmax.output_length)(input_lengths)
# output_length = BatchNormalization()(input_lengths)
# CTC loss is implemented in a lambda layer
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')(
[input_to_softmax.output, the_labels, output_lengths, label_lengths])
model = Model(
inputs=[input_to_softmax.input, the_labels, input_lengths, label_lengths],
outputs=loss_out)
return model
def train_model(input_to_softmax,
pickle_path,
save_model_path,
train_json='train_corpus.json',
valid_json='valid_corpus.json',
minibatch_size=20,
spectrogram=True,
mfcc_dim=13,
optimizer=SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5),
epochs=20,
verbose=1,
sort_by_duration=False,
max_duration=10.0):
# create a class instance for obtaining batches of data
audio_gen = AudioGenerator(minibatch_size=minibatch_size,
spectrogram=spectrogram, mfcc_dim=mfcc_dim, max_duration=max_duration,
sort_by_duration=sort_by_duration)
# add the training data to the generator
audio_gen.load_train_data(train_json)
audio_gen.load_validation_data(valid_json)
# calculate steps_per_epoch
num_train_examples=len(audio_gen.train_audio_paths)
steps_per_epoch = num_train_examples//minibatch_size
# calculate validation_steps
num_valid_samples = len(audio_gen.valid_audio_paths)
validation_steps = num_valid_samples//minibatch_size
# add CTC loss to the NN specified in input_to_softmax
model = add_ctc_loss(input_to_softmax)
# CTC loss is implemented elsewhere, so use a dummy lambda function for the loss
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=optimizer)
# make results/ directory, if necessary
if not os.path.exists('results'):
os.makedirs('results')
# add checkpointer
checkpointer = ModelCheckpoint(filepath='results/'+save_model_path, verbose=0)
# train the model
hist = model.fit_generator(generator=audio_gen.next_train(), steps_per_epoch=steps_per_epoch,
epochs=epochs, validation_data=audio_gen.next_valid(), validation_steps=validation_steps,
callbacks=[checkpointer], verbose=verbose)
# save model loss
with open('results/'+pickle_path, 'wb') as f:
pickle.dump(hist.history, f)
################################ all codes of train_utils.py ends here #######################################
"""
model_0 = simple_rnn_model(input_dim=13) # change to 13 if you would like to use MFCC features
"""
"""
train_model(input_to_softmax=model_0,
pickle_path='model_0.pickle',
save_model_path='model_0.h5',
spectrogram=False) # change to False if you would like to use MFCC features
"""
model_end = final_model(input_dim=13,
filters=200,
kernel_size=11,
conv_stride=2,
conv_border_mode='valid',
units=200,
activation='relu',
cell=GRU,
dropout_rate=1,
number_of_layers=2)
train_model(input_to_softmax=model_end,
pickle_path='model_end.pickle',
save_model_path='model_end.h5',
epochs=5,
spectrogram=False)
"""
model_4 = bidirectional_rnn_model(input_dim=13, # change to 13 if you would like to use MFCC features
units=200)
train_model(input_to_softmax=model_4,
pickle_path='model_4.pickle',
save_model_path='model_4.h5',
epochs=5,
optimizer=SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=2),
spectrogram=False) # change to False if you would like to use MFCC features
"""
def get_predictions(index, partition, input_to_softmax, model_path):
""" Print a model's decoded predictions
Params:
index (int): The example you would like to visualize
partition (str): One of 'train' or 'validation'
input_to_softmax (Model): The acoustic model
model_path (str): Path to saved acoustic model's weights
"""
# load the train and test data
data_gen = AudioGenerator(spectrogram=False)
data_gen.load_train_data()
data_gen.load_validation_data()
# obtain the true transcription and the audio features
if partition == 'validation':
transcr = data_gen.valid_texts[index]
audio_path = data_gen.valid_audio_paths[index]
data_point = data_gen.normalize(data_gen.featurize(audio_path))
elif partition == 'train':
transcr = data_gen.train_texts[index]
audio_path = data_gen.train_audio_paths[index]
data_point = data_gen.normalize(data_gen.featurize(audio_path))
else:
raise Exception('Invalid partition! Must be "train" or "validation"')
# obtain and decode the acoustic model's predictions
input_to_softmax.load_weights(model_path)
prediction = input_to_softmax.predict(np.expand_dims(data_point, axis=0))
print("prediction.shape: " + str(prediction.shape))
output_length = [input_to_softmax.output_length(data_point.shape[0])]
pred_ints = (K.eval(K.ctc_decode(
prediction, output_length)[0][0])+1).flatten().tolist()
print("pred_ints: " + str(pred_ints))
print("len(pred_ints): " + str(len(pred_ints)))
# play the audio file, and display the true and predicted transcriptions
print('-'*80)
Audio(audio_path)
print('True transcription:\n' + '\n' + transcr)
print('-'*80)
print('Predicted transcription:\n' + '\n' + ''.join(int_sequence_to_text(pred_ints)))
print('-'*80)
"""
get_predictions(index=2,
partition='validation',
input_to_softmax=model_end,
model_path='results/model_end.h5')
"""
"""
get_predictions(index=1,
partition='validation',
input_to_softmax=model_0,
model_path='results/model_0.h5')
"""
|
MdAbuNafeeIbnaZahid/English-Speech-to-Text-Using-Keras
|
speech-recognition-neural-network/train.py
|
train.py
|
py
| 31,706 |
python
|
en
|
code
| 6 |
github-code
|
6
|
[
{
"api_name": "utils.calc_feat_dim",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "random.Random",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "utils.text_to_int_sequence",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "utils.spectrogram_from_file",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "scipy.io.wavfile.read",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "scipy.io.wavfile",
"line_number": 278,
"usage_type": "name"
},
{
"api_name": "python_speech_features.mfcc",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "numpy.random.permutation",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 296,
"usage_type": "attribute"
},
{
"api_name": "numpy.argsort",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "librosa.load",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 339,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 343,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 344,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 345,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 346,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 350,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.cm",
"line_number": 352,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 352,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 353,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 354,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 355,
"usage_type": "name"
},
{
"api_name": "mpl_toolkits.axes_grid1.make_axes_locatable",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.colorbar",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 358,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 360,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 364,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.cm",
"line_number": 366,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 366,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 367,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 368,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 369,
"usage_type": "name"
},
{
"api_name": "mpl_toolkits.axes_grid1.make_axes_locatable",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.colorbar",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 372,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 373,
"usage_type": "name"
},
{
"api_name": "keras.layers.Input",
"line_number": 423,
"usage_type": "call"
},
{
"api_name": "keras.layers.GRU",
"line_number": 425,
"usage_type": "call"
},
{
"api_name": "keras.layers.Activation",
"line_number": 428,
"usage_type": "call"
},
{
"api_name": "keras.models.Model",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "keras.layers.Input",
"line_number": 439,
"usage_type": "call"
},
{
"api_name": "keras.layers.LSTM",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "keras.layers.BatchNormalization",
"line_number": 444,
"usage_type": "call"
},
{
"api_name": "keras.layers.TimeDistributed",
"line_number": 446,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 446,
"usage_type": "call"
},
{
"api_name": "keras.layers.Activation",
"line_number": 448,
"usage_type": "call"
},
{
"api_name": "keras.models.Model",
"line_number": 450,
"usage_type": "call"
},
{
"api_name": "keras.layers.Input",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv1D",
"line_number": 463,
"usage_type": "call"
},
{
"api_name": "keras.layers.BatchNormalization",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "keras.layers.GRU",
"line_number": 471,
"usage_type": "call"
},
{
"api_name": "keras.layers.BatchNormalization",
"line_number": 474,
"usage_type": "call"
},
{
"api_name": "keras.layers.TimeDistributed",
"line_number": 476,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 476,
"usage_type": "call"
},
{
"api_name": "keras.layers.Activation",
"line_number": 478,
"usage_type": "call"
},
{
"api_name": "keras.models.Model",
"line_number": 480,
"usage_type": "call"
},
{
"api_name": "keras.layers.Input",
"line_number": 512,
"usage_type": "call"
},
{
"api_name": "keras.layers.LSTM",
"line_number": 515,
"usage_type": "call"
},
{
"api_name": "keras.layers.BatchNormalization",
"line_number": 516,
"usage_type": "call"
},
{
"api_name": "keras.layers.LSTM",
"line_number": 518,
"usage_type": "call"
},
{
"api_name": "keras.layers.BatchNormalization",
"line_number": 519,
"usage_type": "call"
},
{
"api_name": "keras.layers.LSTM",
"line_number": 522,
"usage_type": "call"
},
{
"api_name": "keras.layers.BatchNormalization",
"line_number": 523,
"usage_type": "call"
},
{
"api_name": "keras.layers.LSTM",
"line_number": 525,
"usage_type": "call"
},
{
"api_name": "keras.layers.BatchNormalization",
"line_number": 526,
"usage_type": "call"
},
{
"api_name": "keras.layers.TimeDistributed",
"line_number": 529,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 529,
"usage_type": "call"
},
{
"api_name": "keras.layers.Activation",
"line_number": 531,
"usage_type": "call"
},
{
"api_name": "keras.models.Model",
"line_number": 533,
"usage_type": "call"
},
{
"api_name": "keras.layers.Input",
"line_number": 542,
"usage_type": "call"
},
{
"api_name": "keras.layers.Bidirectional",
"line_number": 544,
"usage_type": "call"
},
{
"api_name": "keras.layers.LSTM",
"line_number": 544,
"usage_type": "call"
},
{
"api_name": "keras.layers.TimeDistributed",
"line_number": 546,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 546,
"usage_type": "call"
},
{
"api_name": "keras.layers.Activation",
"line_number": 548,
"usage_type": "call"
},
{
"api_name": "keras.models.Model",
"line_number": 550,
"usage_type": "call"
},
{
"api_name": "keras.layers.GRU",
"line_number": 557,
"usage_type": "name"
},
{
"api_name": "keras.layers.Input",
"line_number": 561,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv1D",
"line_number": 563,
"usage_type": "call"
},
{
"api_name": "keras.layers.BatchNormalization",
"line_number": 569,
"usage_type": "call"
},
{
"api_name": "keras.layers.BatchNormalization",
"line_number": 575,
"usage_type": "call"
},
{
"api_name": "keras.layers.BatchNormalization",
"line_number": 579,
"usage_type": "call"
},
{
"api_name": "keras.layers.BatchNormalization",
"line_number": 584,
"usage_type": "call"
},
{
"api_name": "keras.layers.BatchNormalization",
"line_number": 588,
"usage_type": "call"
},
{
"api_name": "keras.layers.TimeDistributed",
"line_number": 591,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 591,
"usage_type": "call"
},
{
"api_name": "keras.layers.Activation",
"line_number": 593,
"usage_type": "call"
},
{
"api_name": "keras.models.Model",
"line_number": 595,
"usage_type": "call"
},
{
"api_name": "keras.backend.ctc_batch_cost",
"line_number": 617,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 617,
"usage_type": "name"
},
{
"api_name": "keras.layers.Input",
"line_number": 621,
"usage_type": "call"
},
{
"api_name": "keras.layers.Input",
"line_number": 622,
"usage_type": "call"
},
{
"api_name": "keras.layers.Input",
"line_number": 623,
"usage_type": "call"
},
{
"api_name": "keras.layers.Lambda",
"line_number": 624,
"usage_type": "call"
},
{
"api_name": "keras.layers.Lambda",
"line_number": 628,
"usage_type": "call"
},
{
"api_name": "keras.models.Model",
"line_number": 630,
"usage_type": "call"
},
{
"api_name": "keras.optimizers.SGD",
"line_number": 643,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 670,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 670,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 671,
"usage_type": "call"
},
{
"api_name": "keras.callbacks.ModelCheckpoint",
"line_number": 674,
"usage_type": "call"
},
{
"api_name": "_pickle.dump",
"line_number": 683,
"usage_type": "call"
},
{
"api_name": "keras.layers.GRU",
"line_number": 723,
"usage_type": "name"
},
{
"api_name": "numpy.expand_dims",
"line_number": 777,
"usage_type": "call"
},
{
"api_name": "keras.backend.eval",
"line_number": 780,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 780,
"usage_type": "name"
},
{
"api_name": "keras.backend.ctc_decode",
"line_number": 780,
"usage_type": "call"
},
{
"api_name": "IPython.display.Audio",
"line_number": 787,
"usage_type": "call"
},
{
"api_name": "utils.int_sequence_to_text",
"line_number": 790,
"usage_type": "call"
}
] |
71971288509
|
from kubeflow.fairing.cloud.docker import get_docker_secret
from kubeflow.fairing.constants import constants
import json
import os
def test_docker_secret_spec():
os.environ["DOCKER_CONFIG"] = "/tmp"
config_dir = os.environ.get('DOCKER_CONFIG')
config_file_name = 'config.json'
config_file = os.path.join(config_dir, config_file_name)
with open(config_file, 'w+') as f:
json.dump({'config': "config"}, f)
docker_secret = get_docker_secret()
assert docker_secret.metadata.name == constants.DOCKER_CREDS_SECRET_NAME
os.remove(config_file)
|
kubeflow/fairing
|
tests/unit/cloud/test_docker.py
|
test_docker.py
|
py
| 578 |
python
|
en
|
code
| 336 |
github-code
|
6
|
[
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "kubeflow.fairing.cloud.docker.get_docker_secret",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "kubeflow.fairing.constants.constants.DOCKER_CREDS_SECRET_NAME",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "kubeflow.fairing.constants.constants",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "os.remove",
"line_number": 16,
"usage_type": "call"
}
] |
15565393240
|
import logging
import os
from typing import List
from plumbum import cmd, local
from pathlib import Path
import doit
from doit.action import CmdAction
from constants import DEFAULT_DB, DB_USERNAME, DB_PASSWORD, VERBOSITY_DEFAULT
logging.basicConfig()
logger = logging.getLogger("dodo")
logger.setLevel(logging.DEBUG)
NOISEPAGE_PATH = Path.joinpath(Path.home(), "noisepage-pilot").absolute()
ARTIFACTS_PATH = Path.joinpath(NOISEPAGE_PATH, "artifacts/benchbase")
PROJECT_PATH = Path.joinpath(NOISEPAGE_PATH, "artifacts/project")
POSTGRES_PATH = str(Path.joinpath(Path.home(), "postgres/build/bin"))
POSTGRES_DATA_PATH = str(Path.joinpath(Path.home(), "postgresql/data"))
ARTIFACT_benchbase = Path.joinpath(ARTIFACTS_PATH, "benchbase.jar")
ARTIFACT_benchbase_results = ARTIFACT_benchbase / "results"
PSQL = "/home/kramana2/postgres/build/bin/psql"
BENCHBASE_CONFIG_TAGS = {
"scalefactor": "/parameters/scalefactor",
"time": "/parameters/works/work/time",
"rate": "/parameters/works/work/rate",
"terminals": "/parameters/terminals",
}
def task_hello():
return {"actions": ["echo 'Hello world!'"], "verbosity": VERBOSITY_DEFAULT}
def get_config_path(benchmark, config=None) -> str:
"""
Fetches the path to the config file of the given benchmark.
"""
if config is None:
config = PROJECT_PATH / f"{benchmark}_config.xml"
elif not config.startswith("/"):
config = Path(NOISEPAGE_PATH / config).absolute()
return str(config)
def task_update_log_collection():
sql_list = [
"ALTER SYSTEM SET log_destination='csvlog'",
"ALTER SYSTEM SET logging_collector='on'",
"ALTER SYSTEM SET log_statement='all'",
"ALTER SYSTEM SET log_connections='on'",
"ALTER SYSTEM SET log_disconnections='on'",
"ALTER SYSTEM SET log_directory='%(log_directory)s'",
]
return {
"actions": [
f"mkdir -p {POSTGRES_DATA_PATH}/%(log_directory)s",
*[
f'PGPASSWORD={DB_PASSWORD} {PSQL} --host=localhost --dbname={DEFAULT_DB} --username={DB_USERNAME} --command="{sql}"'
for sql in sql_list
],
],
"params": [
{
"name": "log_directory",
"long": "log_directory",
"default": "log",
},
{
"name": "log_file",
"long": "log_file",
"default": "postgresql-%Y-%m-%d_%H%M%S.log",
},
],
"verbosity": VERBOSITY_DEFAULT,
}
def task_perform_vacuum():
"""
Postgres: Performs vacuuming on the database system.
"""
return {
"actions": [
*[
f'PGPASSWORD={DB_PASSWORD} {PSQL} --host=localhost --dbname={DEFAULT_DB} --username={DB_USERNAME} --command="VACUUM;"'
],
],
"params": [],
"verbosity": VERBOSITY_DEFAULT,
}
def task_update_config():
def update_xml(benchmark, scalefactor=1, time=60, rate=10, terminals=1):
kwargs = locals().copy()
del kwargs["benchmark"]
config = get_config_path(benchmark)
logger.info(f"Updating arguments in config file {config} with values: {kwargs}")
actions = []
for param in kwargs:
# We're assuming that all keys in kwargs are in BENCHBASE_CONFIG_TAGS
key = BENCHBASE_CONFIG_TAGS[param]
value = locals()[param]
cmd = f"xmlstarlet edit --inplace --update '{key}' --value \"{value}\" {config}"
actions.append(cmd)
return "; \n".join(actions)
return {
"actions": [
CmdAction(update_xml),
],
"params": [
{
"name": "benchmark",
"long": "benchmark",
"help": "The benchmark to run.",
"default": "epinions",
},
{
"name": "scalefactor",
"long": "scalefactor",
"default": 1,
},
{
"name": "time",
"long": "time",
"default": 60, # 60s
},
{
"name": "rate",
"long": "rate",
"default": 10,
},
{
"name": "terminals",
"long": "terminals",
"default": 1,
},
],
"verbosity": VERBOSITY_DEFAULT,
}
def task_benchbase_workload_create():
"""
Benchbase: initializes the specified benchmark.
"""
def invoke_benchbase(benchmark, config, directory):
config = get_config_path(benchmark, config)
return f"echo {config}; java -jar benchbase.jar -b {benchmark} -c {config} -d {directory} --create=true --load=true"
return {
"actions": [
lambda: os.chdir(str(ARTIFACTS_PATH)),
# Invoke BenchBase.
CmdAction(invoke_benchbase),
# Reset working directory.
lambda: os.chdir(doit.get_initial_workdir()),
],
"file_dep": [ARTIFACT_benchbase],
"uptodate": [False],
"verbosity": VERBOSITY_DEFAULT,
"params": [
{
"name": "benchmark",
"long": "benchmark",
"help": "The benchmark to run.",
"default": "epinions",
},
{
"name": "config",
"long": "config",
"help": (
"The config file to use for BenchBase."
"Defaults to the config in the artifacts folder for the selected benchmark."
),
"default": None,
},
{
"name": "directory",
"long": "directory",
"default": f"{ARTIFACT_benchbase_results}",
},
],
}
def task_benchbase_run():
"""
BenchBase: run a specific benchmark.
"""
def invoke_benchbase(benchmark, config, directory, args):
config = get_config_path(benchmark, config)
return f"echo {config}; java -jar benchbase.jar -b {benchmark} -c {config} -d {directory} {args}"
return {
"actions": [
lambda: os.chdir(str(ARTIFACTS_PATH)),
# Invoke BenchBase.
CmdAction(invoke_benchbase),
# Reset working directory.
lambda: os.chdir(doit.get_initial_workdir()),
],
"file_dep": [ARTIFACT_benchbase],
"uptodate": [False],
"verbosity": VERBOSITY_DEFAULT,
"params": [
{
"name": "benchmark",
"long": "benchmark",
"help": "The benchmark to run.",
"default": "epinions",
},
{
"name": "config",
"long": "config",
"help": (
"The config file to use for BenchBase."
"Defaults to the config in the artifacts folder for the selected benchmark."
),
"default": None,
},
{
"name": "directory",
"long": "directory",
"default": f"{ARTIFACT_benchbase_results}",
},
{
"name": "args",
"long": "args",
"help": "Arguments to pass to BenchBase invocation.",
"default": "--create=false --load=false --execute=false",
},
],
}
|
karthik-ramanathan-3006/15-799-Special-Topics-in-Database-Systems
|
dodos/dodo.py
|
dodo.py
|
py
| 7,577 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path.joinpath",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "pathlib.Path.home",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pathlib.Path.joinpath",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "pathlib.Path.joinpath",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pathlib.Path.joinpath",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "pathlib.Path.home",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pathlib.Path.joinpath",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "pathlib.Path.home",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pathlib.Path.joinpath",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "constants.VERBOSITY_DEFAULT",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "constants.DB_PASSWORD",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "constants.DEFAULT_DB",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "constants.DB_USERNAME",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "constants.VERBOSITY_DEFAULT",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "constants.DB_PASSWORD",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "constants.DEFAULT_DB",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "constants.DB_USERNAME",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "constants.VERBOSITY_DEFAULT",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "plumbum.cmd",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "plumbum.cmd",
"line_number": 113,
"usage_type": "argument"
},
{
"api_name": "doit.action.CmdAction",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "constants.VERBOSITY_DEFAULT",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "os.chdir",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "doit.action.CmdAction",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "doit.get_initial_workdir",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "constants.VERBOSITY_DEFAULT",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "os.chdir",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "doit.action.CmdAction",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "doit.get_initial_workdir",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "constants.VERBOSITY_DEFAULT",
"line_number": 217,
"usage_type": "name"
}
] |
69894822589
|
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
import datetime as dt
from airflow.utils.dates import days_ago
default_args = {
'owner': 'gregh',
'start_date': days_ago(0),
'email': ['[email protected]'],
'email_on_failure': True,
'email_on_retry': True,
'retries': 2,
'retry_delay': dt.timedelta(minutes=5)
}
dag = DAG(
dag_id='process_web_log',
schedule_interval=dt.timedelta(days=1),
default_args=default_args,
description='Airflow Web Log Daily Processor'
)
extract_data = BashOperator(
task_id='extract',
bash_command='cut -d "-" -f1 /home/project/airflow/dags/capstone/accesslogs.txt > /home/project/airflow/dags/capstone/extracted_data.txt',
dag=dag
)
transform_data = BashOperator(
task_id='transform',
bash_command='sed "/198.46.149.143/d" /home/project/airflow/dags/capstone/extracted_data.txt > /home/project/airflow/dags/capstone/transformed_data.txt',
dag=dag
)
load_data = BashOperator(
task_id='load',
bash_command='tar -cvf /home/project/airflow/dags/capstone/weblog.tar /home/project/airflow/dags/capstone/transformed_data.txt',
dag=dag
)
extract_data >> transform_data >> load_data
|
gregh13/Data-Engineering
|
Projects/Capstone Project/Task 5/Part Two - Apache Airflow ETL/process_web_log.py
|
process_web_log.py
|
py
| 1,221 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "airflow.utils.dates.days_ago",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "airflow.DAG",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "airflow.operators.bash_operator.BashOperator",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "airflow.operators.bash_operator.BashOperator",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "airflow.operators.bash_operator.BashOperator",
"line_number": 38,
"usage_type": "call"
}
] |
8927274924
|
from datetime import datetime as dt
from datetime import timedelta
import pickle
import time
import dask.dataframe as dd
from dask.distributed import as_completed, worker_client
import numpy as np
import pandas as pd
import requests
import s3fs
BUCKET = "insulator-citi-bikecaster"
INSULATOR_URLS = [
"https://api-dev.insulator.ai/v1/time_series",
"https://ybcbwoz3w6.execute-api.us-east-1.amazonaws.com/staging/v1/time_series"
]
s3 = s3fs.S3FileSystem()
def model_key(station_id):
return f"models/station_{station_id}.pkl"
def load_model(station_id):
with s3.open(f"{BUCKET}/{model_key(station_id)}", "rb") as f:
return pickle.loads(f.read())
def load_local_model(station_id):
with open(f"models/station_{station_id}.pkl", "rb") as f:
return pickle.load(f)
def ts_to_unixtime(series):
return series.astype(np.int64) // 10 ** 9
def post_outcome(df, station_id, usernames, api_keys):
two_hours_ago = dt.now() - timedelta(hours=2)
past_two_hours = df[df["last_reported"] >= two_hours_ago]
past_two_hours = past_two_hours.sort_values("last_reported")
series_timestamps = ts_to_unixtime(past_two_hours["last_reported"]).tolist()
series_values = past_two_hours["num_bikes_available"].astype("int").tolist()
post_event(station_id, series_timestamps, series_values, "outcome", usernames, api_keys)
def post_event(station_id, series_timestamps, series_values, event_type, usernames, api_keys):
payload = {
"service_name": "bikecaster",
"model_name": "lin_reg",
"model_version": "0.1.0",
"timestamp": time.time(),
"entities": {"station_id": station_id},
"series_timestamps": series_timestamps,
"series_values": series_values
}
assert event_type in ("prediction", "outcome")
for username, api_key, insulator_url in zip(usernames, api_keys, INSULATOR_URLS):
url = f"{insulator_url}/{event_type}"
try:
response = requests.post(url, auth=(username, api_key), json=payload)
if not response:
print(f"Error posting to insulator ingest API: {response.text}")
except Exception as e:
print(e)
def make_forecast(df, station_id, usernames, api_keys):
station_df = df[df["station_id"] == station_id]
post_outcome(station_df, station_id, usernames, api_keys)
station_df = (
station_df
.set_index("last_reported")
.sort_index()
.resample("5T", label="right", closed="right")
.last()
.fillna(method="ffill")
)
y = station_df["num_bikes_available"].values.copy()
X = y.reshape(-1, 1).copy()
try:
model = load_local_model(station_id)
except:
print(f"There's no model for station {station_id}")
return False
try:
series_values = np.squeeze(model.predict(X, start_idx=len(X) - 1))
except:
print(f"Error predicting for station {station_id}")
return False
series_values = np.clip(series_values.astype(int), 0, None).astype("int").tolist()
series_timestamps = pd.date_range(
station_df.index[-1], periods=len(series_values) + 1, freq="5T"
)
# Remove the first value because it's the last value in the original data.
series_timestamps = series_timestamps[1:]
series_timestamps = ts_to_unixtime(series_timestamps).astype("int").tolist()
post_event(station_id, series_timestamps, series_values, "prediction", usernames, api_keys)
return True
def pipeline(s3_path, usernames, api_keys):
df = dd.read_csv(s3_path).compute()
df["last_reported"] = pd.to_datetime(df["last_reported"])
MIN_DATE = "2016-01-01"
df = df[df.last_reported >= MIN_DATE]
with worker_client() as client:
df_future = client.scatter(df)
futures = []
for station_id in sorted(df["station_id"].unique().tolist()):
futures.append(client.submit(make_forecast, df_future, station_id, usernames, api_keys))
total = len(futures)
success = 0
for result in as_completed(futures):
if result.result():
success += 1
if success % 50 == 0:
print(f"{success} / {total} tasks successfully completed")
print(f"Done. Final tally: {success} / {total} tasks successfully completed")
return True
|
EthanRosenthal/citi-bikecaster-model
|
calcs.py
|
calcs.py
|
py
| 4,374 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "s3fs.S3FileSystem",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pickle.loads",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.int64",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.squeeze",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "pandas.date_range",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "dask.dataframe.read_csv",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "dask.dataframe",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "pandas.to_datetime",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "dask.distributed.worker_client",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "dask.distributed.as_completed",
"line_number": 123,
"usage_type": "call"
}
] |
30827334271
|
import json
import os
class FileUtils:
@staticmethod
def readJsonFile(filePath):
with open(filePath, 'r', encoding='utf-8') as file:
jsonData = json.load(file)
return jsonData
@staticmethod
def writeJsonFile(filePath, jsonData):
with open(filePath, 'w', encoding='utf-8') as file:
file.write(json.dumps(jsonData, sort_keys=False, indent=4, separators=(',', ': ')))
@staticmethod
def readLinesFromFile(filePath) -> list:
with open(filePath, 'r', encoding='utf-8') as f:
return [line.replace('\n', '') for line in f.readlines()]
|
Danny0515/Portfolio-crawler
|
src/main/utils/FileUtils.py
|
FileUtils.py
|
py
| 622 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "json.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 15,
"usage_type": "call"
}
] |
21419147973
|
import numpy as np
from os.path import join
from psbody.mesh import Mesh
from fitting.landmarks import load_embedding, landmark_error_3d, mesh_points_by_barycentric_coordinates, load_picked_points
from fitting.util import load_binary_pickle, write_simple_obj, safe_mkdir, get_unit_factor
import open3d as o3d
import argparse, os
from tqdm import tqdm
import logging
logger = logging.getLogger(__name__)
def get_config():
parser = argparse.ArgumentParser(description='modify mean and std and orientation')
parser.add_argument("--scans", type=str, default= "mesh", help='path of the scan') # for a mesh path, replace 'mesh' to 'lmk' get its corresponding lmk path
parser.add_argument("--lmks", type=str, default= "lmk", help='path of the output')
parser.add_argument("--save", type=str, default= "lx_result", help='path of the output')
args = parser.parse_args()
return args
def x_rotate(v):
return v*[1, -1, -1]
def transl(v, old_mean, new_mean):
return v-old_mean+new_mean
def transl_scale(v, old_mean, old_std, new_mean, new_std):
return (v-old_mean)/old_std*new_std+new_mean
def modify_face(face):
return face
def get_vertice_mean_std(v):
return np.mean(v, axis=0), np.std(v)
def get_mean_std(filename):
mesh = Mesh(filename=filename)
if hasattr(mesh, 'f'):
mesh.f = modify_face(mesh.f) # TODO: 尚未确定是否需要扭转面片方向
mean = np.mean(mesh.v, axis=0)
std = np.std(mesh.v)
return mean, std, mesh
def flamefit_test():
eg = './data/scan.obj'
lmk = './data/scan_lmks.npy'
eg_mean, eg_std, eg_mesh = get_mean_std(eg) # mean x-y-z分开算, std整体算
eg_lmk = np.load(lmk)
print(f'my example scan mean: {eg_mean}, std: {eg_std}')
my_scan = "/mnt/cephfs/home/liuxu/cvte/tools/flame-fitting/data/test/mesh/3_pointcloud.obj"
my_lmk = "/mnt/cephfs/home/liuxu/cvte/tools/flame-fitting/data/test/lmk/3_pointcloud.npy"
mean, std, mesh = get_mean_std(my_scan)
lmk = np.load(my_lmk)
v = mesh.v
print(f'my origina scan mean: {mean}, std: {std}')
v = x_rotate(v)
lmk = x_rotate(lmk)
write_simple_obj(v, mesh.f if hasattr(mesh, 'f') else None, my_scan.replace('.obj', '_x.obj'))
np.save(my_lmk.replace('.npy', '_x.npy'), lmk)
mean, std = get_vertice_mean_std(v)
print(f'my rotated scan mean: {mean}, std: {std}')
v_transl = transl(v, mean, eg_mean)
lmk_transl = transl(lmk, mean, eg_mean)
write_simple_obj(v_transl, mesh.f if hasattr(mesh, 'f') else None, my_scan.replace('.obj', '_x_transl.obj'))
np.save(my_lmk.replace('.npy', '_x_transl.npy'), lmk_transl)
mean_transl, std_transl = get_vertice_mean_std(v_transl)
print(f'my transla scan mean: {mean_transl}, std: {std_transl}')
v = transl_scale(v, mean, std, eg_mean, eg_std)
lmk = transl_scale(lmk, mean, std, eg_mean, eg_std)
write_simple_obj(v, mesh.f if hasattr(mesh, 'f') else None, my_scan.replace('.obj', '_x_transl_scale.obj'))
np.save(my_lmk.replace('.npy', '_x_transl_scale.npy'), lmk)
mean, std = get_vertice_mean_std(v)
print(f'my tra_sca scan mean: {mean}, std: {std}')
# scale to similar size based on lmk
eg_lmk = eg_lmk - eg_mean
lmk = lmk - mean # 关键点相对于原点的坐标
times = np.mean(np.mean(eg_lmk/lmk, axis=1)) # 关键点的avg倍数
v = (v - mean)*times
lmk = lmk*times
mean, std = get_vertice_mean_std(v)
print(f'my fang_da scan mean: {mean}, std: {std}')
v = transl_scale(v, mean, std, eg_mean, eg_std)
lmk = transl_scale(lmk, mean, std, eg_mean, eg_std)
write_simple_obj(v, mesh.f if hasattr(mesh, 'f') else None, my_scan.replace('.obj', '_x_transl_scale_fangda.obj'))
np.save(my_lmk.replace('.npy', '_x_transl_scale_fangda.npy'), lmk)
mean, std = get_vertice_mean_std(v)
print(f'my finally scan mean: {mean}, std: {std}')
# 只需要旋转并平移一下就ok了,调这个函数
def liuxu_flamefit():
eg = './data/scan.obj'
lmk = './data/scan_lmks.npy'
eg_mean, eg_std, eg_mesh = get_mean_std(eg) # mean x-y-z分开算, std整体算
eg_lmk = np.load(lmk)
print(f'my example scan mean: {eg_mean}, std: {eg_std}')
my_scan = "/mnt/cephfs/home/liuxu/cvte/tools/flame-fitting/data/new_cap/mesh/0_face.obj"
my_lmk = "/mnt/cephfs/home/liuxu/cvte/tools/flame-fitting/data/new_cap/lmk/0_face.npy"
mean, std, mesh = get_mean_std(my_scan)
lmk = np.load(my_lmk)[-51:]
v = mesh.v
print(f'my origina scan mean: {mean}, std: {std}')
v = x_rotate(v)
lmk = x_rotate(lmk)
# write_simple_obj(v, mesh.f if hasattr(mesh, 'f') else None, my_scan.replace('.obj', '_x.obj'))
# np.save(my_lmk.replace('.npy', '_x.npy'), lmk)
mean, std = get_vertice_mean_std(v)
# print(f'my rotated scan mean: {mean}, std: {std}')
v_transl = transl(v, mean, eg_mean) # 到这一步得到的obj,fit效果最好
lmk_transl = transl(lmk, mean, eg_mean)
write_simple_obj(v_transl, mesh.f if hasattr(mesh, 'f') else None, my_scan.replace('.obj', '_x_transl.obj'))
np.save(my_lmk.replace('.npy', '_x_transl.npy'), lmk_transl)
mean_transl, std_transl = get_vertice_mean_std(v_transl)
print(f'my transla scan mean: {mean_transl}, std: {std_transl}')
# v = transl_scale(v, mean, std, eg_mean, eg_std)
# lmk = transl_scale(lmk, mean, std, eg_mean, eg_std)
# write_simple_obj(v, mesh.f if hasattr(mesh, 'f') else None, my_scan.replace('.obj', '_x_transl_scale.obj'))
# np.save(my_lmk.replace('.npy', '_x_transl_scale.npy'), lmk)
# mean, std = get_vertice_mean_std(v)
# print(f'my tra_sca scan mean: {mean}, std: {std}')
def get_lmk_meanstd(lmk):
mean = np.mean(lmk, axis=0)
std = np.std(lmk)
return mean, std
# 只需要旋转并平移一下就ok了,调这个函数
def liuxu_modify_basedon_lmk():
eg = 'data/scan.obj'
lmk = 'data/scan_lmks.npy'
eg_lmk = np.load(lmk)
eg_mean, eg_std = get_lmk_meanstd(eg_lmk) # mean x-y-z分开算, std整体算
print(f'my example lmk mean: {eg_mean}, std: {eg_std}')
my_scan = "data/lizhenliang2/lizhenliang2_down10.ply"
my_lmk = "data/lizhenliang2/lizhenliang2_picked_points.pp"
lmk = get_lmk(my_lmk)[-51:]
mean, std = get_lmk_meanstd(lmk)
mesh = Mesh(filename=my_scan)
v = mesh.v
print(f'my origina lmk mean: {mean}, std: {std}')
v = x_rotate(v)
lmk = x_rotate(lmk)
mean, std = get_lmk_meanstd(lmk)
v_transl = transl(v, mean, eg_mean) # 到这一步得到的obj,fit效果最好
lmk_transl = transl(lmk, mean, eg_mean)
write_simple_obj(v_transl, mesh.f if hasattr(mesh, 'f') else None, my_scan.replace('.ply', '_x_transl_by_lmk.obj'))
np.save(my_lmk.replace('.pp', '_x_transl_by_lmk.npy'), lmk_transl)
mean_transl, std_transl = get_lmk_meanstd(lmk_transl)
print(f'my transla lmk mean: {mean_transl}, std: {std_transl}')
# v = transl_scale(v, mean, std, eg_mean, eg_std)
# lmk = transl_scale(lmk, mean, std, eg_mean, eg_std)
# write_simple_obj(v, mesh.f if hasattr(mesh, 'f') else None, my_scan.replace('.obj', '_x_transl_scale_by_lmk.obj'))
# np.save(my_lmk.replace('.npy', '_x_transl_scale_by_lmk.npy'), lmk)
# mean, std = get_lmk_meanstd(lmk)
# print(f'my tra_sca lmk mean: {mean}, std: {std}')
# print(f'the 13th lmk of example: {eg_lmk[13]}, my: {lmk[13]}')
def get_lmk(lmk_path):
if lmk_path.endswith('.npy'):
lmk = np.load(lmk_path)
elif lmk_path.endswith('.pp'):
lmk = load_picked_points(lmk_path)
return lmk
def stupid_test():
eg = './data/scan.obj'
eg_mean, eg_std, eg_mesh = get_mean_std(eg)
args = get_config()
save_root = join('data', args.save)
os.makedirs(save_root, exist_ok=True)
save_scan = join(save_root, args.scans)
os.makedirs(save_scan, exist_ok=True)
save_lmk = join(save_root, args.lmks)
os.makedirs(save_lmk, exist_ok=True)
scans = join('./data/test', args.scans)
for r, ds, fs in os.walk(scans):
for f in tqdm(fs):
if f.endswith("obj"):
scan_path = os.path.join(r,f)
print(scan_path)
output = join(save_scan, f)
mean, std, mesh = get_mean_std(scan_path)
moved_v = (mesh.v - mean) # 把自己的mesh移到原点并归一化
avg_v = np.mean(moved_v, axis=0)
eg_v = (eg_mesh.v - eg_mean) # 把参考mesh移到原点并归一化
avg_eg_v = np.mean(eg_v, axis=0)
print(f'my origin scan mean: {mean}, origin example mean: {eg_mean}')
print(f'my scan mean: {np.mean(moved_v, axis=0)}, example mean: {np.mean(eg_v, axis=0)}')
avg_scale = np.mean(avg_eg_v/avg_v) * 8.5
print("scale times: ", avg_scale)
scaled_v = moved_v * avg_scale # 这时的mesh应该和示例大小差不多
v = moved_v + eg_mean # 没有放大,只是移动了位置
print(f"my new mean: {np.mean(v, axis=0)}, eg_mean: {eg_mean}")
write_simple_obj(v, mesh.f if hasattr(mesh, 'f') else None, output)
# 对应修改关键点坐标
lmk_path = scan_path.replace(args.scans, args.lmks).replace('obj', 'npy')
ori_lmk = np.load(lmk_path)
ori_lmk *= [1, -1, -1]
lmk_output = join(save_lmk, f.replace('obj', 'npy'))
moved_lmk = (ori_lmk - mean)
scaled_lmk = moved_lmk * avg_scale
modified_lmk = moved_lmk + eg_mean
np.save(lmk_output, modified_lmk)
# res_lmk = o3d.geometry.PointCloud()
# res_lmk.points = o3d.utility.Vector3dVector(modified_lmk)
# res_mesh = o3d.io.read_triangle_mesh(output)
# o3d.visualization.draw_geometries([res_mesh, res_lmk, eg_mesh])
# 只需要旋转并平移一下就ok了,调这个函数
def modify(my_scan, my_lmk):
eg = 'data/scan.obj'
lmk = 'data/scan_lmks.npy'
eg_lmk = np.load(lmk)
eg_mean, eg_std = get_lmk_meanstd(eg_lmk) # mean x-y-z分开算, std整体算
logger.info(f'my example lmk mean: {eg_mean}, std: {eg_std}')
lmk = get_lmk(my_lmk)[-51:]
mean, std = get_lmk_meanstd(lmk)
mesh = Mesh(filename=my_scan)
v = mesh.v
logger.info(f'my origina lmk mean: {mean}, std: {std}')
v = x_rotate(v)
lmk = x_rotate(lmk)
mean, std = get_lmk_meanstd(lmk)
v_transl = transl(v, mean, eg_mean) # 到这一步得到的obj,fit效果最好
lmk_transl = transl(lmk, mean, eg_mean)
write_simple_obj(v_transl, mesh.f if hasattr(mesh, 'f') else None, my_scan.replace('.ply', '_x_transl_by_lmk.obj'))
np.save(my_lmk.replace('.pp', '_x_transl_by_lmk.npy'), lmk_transl)
mean_transl, std_transl = get_lmk_meanstd(lmk_transl)
logger.info(f'my transla lmk mean: {mean_transl}, std: {std_transl}')
trans = -mean + eg_mean
logger.info(f"trans: {trans}")
return my_scan.replace('.ply', '_x_transl_by_lmk.obj'), my_lmk.replace('.pp', '_x_transl_by_lmk.npy'), trans
if __name__ == '__main__':
# flamefit_test()
# liuxu_flamefit()
liuxu_modify_basedon_lmk()
|
qdmy/flame-fitting
|
modify_pointcloud.py
|
modify_pointcloud.py
|
py
| 11,254 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "psbody.mesh.Mesh",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "fitting.util.write_simple_obj",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "fitting.util.write_simple_obj",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "fitting.util.write_simple_obj",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "fitting.util.write_simple_obj",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "fitting.util.write_simple_obj",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "psbody.mesh.Mesh",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "fitting.util.write_simple_obj",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "fitting.landmarks.load_picked_points",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 200,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "fitting.util.write_simple_obj",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "psbody.mesh.Mesh",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "fitting.util.write_simple_obj",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 256,
"usage_type": "call"
}
] |
31282202503
|
# pylint: disable=missing-docstring
# pylint: disable=invalid-name
import functools
import re
# import unicodedata
from string import punctuation as PUNCTUATIONS
import numpy as np
from doors.dates import get_timestamp
SPECIAL_PUNCTUATIONS = PUNCTUATIONS.replace("_", "")
def not_is_feat(col):
return not is_feat(col)
def is_feat(col):
return "feat:" in col
def clean_string(string):
return string.lower().rstrip().replace(" ", "_").replace("'", "")
def to_lowercase(strings):
strings = [string.lower() for string in strings]
return strings
def get_pronounceable_name():
consonants = ["b", "d", "f", "g", "h", "j", "k", "l", "m", "n", "p", "r", "s", "t"]
vowels = ["a", "e", "i", "o", "u"]
final_consonants = ["b", "f", "k", "l", "m", "n", "r", "s", "t"]
return (
np.random.choice(consonants)
+ np.random.choice(vowels)
+ np.random.choice(consonants)
+ np.random.choice(vowels)
+ np.random.choice(final_consonants)
)
def get_unique_id():
"""Pronounceable hash to be pronounced more or less ecclesiastically.
More details: https://www.ewtn.com/expert/answers/ecclesiastical_latin.htm
"""
return get_pronounceable_name() + "_" + get_timestamp("%y%m%d_%H%M%S")
def add_as_strings(*args, **kwargs):
result = args[0].astype(str)
sep = kwargs.get("sep")
if sep:
seperator = np.repeat(sep, len(result))
else:
seperator = None
for arr in args[1:]:
if seperator is not None:
result = _add_strings(result, seperator)
result = _add_strings(result, arr.astype(str))
return result
def _add_strings(v, w):
return np.core.defchararray.add(v, w)
def camelcase_to_underscore(string):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", string)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
def remove_punctuation(string):
for punctuation in SPECIAL_PUNCTUATIONS:
string = string.replace(punctuation, "")
return string
# def utf_to_ascii(string):
# uni_string = unicode(string, "utf")
# ascii_string = unicodedata.normalize("NFKD", uni_string).encode("ascii", "ignore")
# return ascii_string
def is_ascii(string):
try:
string.decode("ascii")
return True
except UnicodeDecodeError:
return False
def as_string(obj):
if hasattr(obj, "__name__"):
representation = obj.__name__
elif isinstance(obj, functools.partial):
representation = _get_partial_representation(obj)
elif hasattr(obj, "__dict__"):
representation = get_class_representation(obj)
elif hasattr(obj, "__name__"):
representation = obj.__name__
else:
representation = str(obj)
return representation
def _get_partial_representation(obj):
func_rep = as_string(obj.func)
input_rep = "func=" + func_rep
if _args_provided(obj):
arg_rep = _get_arg_representation(obj.args)
input_rep += ", " + arg_rep
if _kwargs_provided(obj):
kwarg_rep = get_dict_string_representation(obj.keywords)
input_rep += ", " + kwarg_rep
partial_rep = "partial({})".format(input_rep)
return partial_rep
def _kwargs_provided(obj):
return len(obj.keywords) > 0
def _args_provided(obj):
return len(obj.args) > 0
def _get_arg_representation(args):
return ", ".join([str(arg) for arg in args])
def get_class_representation(obj):
joint_str_rep = get_dict_string_representation(obj.__dict__)
cls_name = obj.__class__.__name__
return "{}({})".format(cls_name, joint_str_rep)
def get_dict_string_representation(dct):
str_rep = []
for key, value in dct.items():
if key[0] != "_":
value_representation = as_string(value)
str_rep.append("{}={}".format(key, value_representation))
joint_str_rep = ", ".join(str_rep)
return joint_str_rep
def convert_camelcase(camelcase):
"""
Credit to:
http://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-
camelcase-to-snake-case
"""
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camelcase)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
def clean_white_space(array):
array = np.array([_clean_white_space(i) for i in array])
return array
def _clean_white_space(v):
if isinstance(v, str):
v = v.strip(" ")
return v
|
chechir/doors
|
doors/strings.py
|
strings.py
|
py
| 4,406 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "string.punctuation.replace",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "string.punctuation",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "string.lower",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "string.lower",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "doors.dates.get_timestamp",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.repeat",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.core.defchararray.add",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.core",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "re.sub",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "string.replace",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "string.decode",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "re.sub",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 163,
"usage_type": "call"
}
] |
10389322209
|
import glob
import os
from os import path as op
import cv2
import numpy as np
from torch.utils.data import DataLoader
from pathlib import Path
from PIL import Image, ImageFilter
from detection.dummy_cnn.dataset import BaseBillOnBackGroundSet
from tqdm import tqdm
from sewar.full_ref import sam as sim_measure
from itertools import combinations, product
import time
from matplotlib import pyplot as plt
from multiprocessing import Pool
import pandas as pd
repo = Path(os.getcwd())
im_dir_gen = os.path.join(repo, "processed_data", "genbills")
im_dir_real = os.path.join(repo, "processed_data", "realbills")
im_dir_unseen = os.path.join(repo, "processed_data", "realbills", "unseen")
def resize(list_of_images, size):
outp = []
for im in tqdm(list_of_images):
copy = im.copy()
copy.thumbnail(size=(size, size), resample=Image.ANTIALIAS)
if copy.width > copy.height:
copy = copy.rotate(90, fillcolor=(0,), expand=True)
outp.append(copy)
return outp
def combs_self(list_of_images):
return np.array(list(combinations(range(len(list_of_images)), r=2))).astype(int)
def combs_between(list_of_images1, list_of_images2):
return np.array(list(product(range(len(list_of_images1)), range(len(list_of_images2))))).astype(int)
def simil(pair): # subfunction to put in parallel loop
im_1, im_2 = pair
m = ""
if im_1.width != im_2.width or im_1.height != im_2.height:
m = f"crop happened\n im1 dims = {im_1.width},{im_1.height},\n im2 dims = {im_2.width},{im_2.height}"
min_w = min(im_1.width, im_2.width)
min_h = min(im_1.height, im_2.height)
im_1 = im_1.crop((1, 1, min_w-1, min_h-1))
im_2 = im_2.crop((1, 1, min_w-1, min_h-1))
m+= f"\n crop dims = 1to{min_w-1}, 1to{min_h-1}"
m+= f"\n final dims = {im_1.width},{im_1.height}"
try:
score = sim_measure(np.array(im_1), np.array(im_2))
except Exception as e:
score = 0.5
print(e)
print(m)
return score
def similarity(list_of_images1, list_of_images2, combs):
similarity_score = 0
list_of_images1 = [list_of_images1[idx] for idx in combs[:,0]]
list_of_images2 = [list_of_images2[idx] for idx in combs[:,1]]
with Pool(12) as pool:
for score in tqdm(pool.imap(simil, zip(list_of_images1, list_of_images2)), total=len(list_of_images1)):
similarity_score += score
pool.close()
similarity_score /= len(combs)
return similarity_score
def edgin(image): #task function to put in Pool loop
corners = cv2.goodFeaturesToTrack(np.array(image.convert("L")), int(1e+6), 1e-6, 1e-6)
return len(corners)
def edginess(list_of_images):
score = 0
with Pool(12) as pool:
for corners in tqdm(pool.imap(edgin, list_of_images), total=len(list_of_images)):
score += corners
score /= len(list_of_images)
return score
# This script is meant do discover which size for training corner_cnn is the best
generated_images = BaseBillOnBackGroundSet(image_dir=im_dir_gen)
loader = DataLoader(dataset=generated_images,
batch_size=1,
num_workers=12,
shuffle=True)
temp = []
for im, _ in tqdm(loader, total=200):
im = im[0].numpy()
where_0 = np.sum(im, axis=2) > 0
for row, element in enumerate(where_0):
if np.all(element == 0): break
for col, element in enumerate(where_0.T):
if np.all(element == 0): break
im = im[:row, :col, :]
try:
temp.append(Image.fromarray(im))
except:
print("Error occured")
if len(temp) == 200: break
generated_images = temp
real_images = glob.glob(op.join(im_dir_real, "*.jpg"), recursive=False)
real_images = [Image.open(file) for file in real_images if not "mask" in file]#[:8]
test_images = glob.glob(op.join(im_dir_unseen, "*.jpg"), recursive=False)
test_images = [Image.open(file) for file in test_images if not "mask" in file]#[:8]
sizes = np.geomspace(1000, 10, 100).astype(int)
scores = {'sim_gen': [],
'sim_real': [],
'sim_test': [],
'sim_gen_vs_real': [],
'sim_gen_vs_test': [],
'sim_test_vs_real': [],
"edg_gen": [],
"edg_real": [],
"edg_test": []}
print("#" * 100)
print()
for size in sizes:
images_of_size = {"gen": [], "real": [], "test": []}
print(f"Resizing {size}")
images_of_size['gen'] = resize(generated_images, size)
images_of_size['real'] = resize(real_images, size)
images_of_size['test'] = resize(test_images, size)
time.sleep(2)
print(f"\nCollect similarity inside every set {size}")
for k in images_of_size.keys():
sim = similarity(list_of_images1=images_of_size[k],
list_of_images2=images_of_size[k],
combs=combs_self(images_of_size[k]))
scores[f'sim_{k}'].append(sim)
time.sleep(2)
print(f"\nCollect similarity inbetween sets {size}")
for k_pair in [("gen", "real"), ("gen", "test"), ("test", "real")]:
sim = similarity(list_of_images1=images_of_size[k_pair[0]],
list_of_images2=images_of_size[k_pair[1]],
combs=combs_between(list_of_images1=images_of_size[k_pair[0]],
list_of_images2=images_of_size[k_pair[1]]))
scores[f'sim_{k_pair[0]}_vs_{k_pair[1]}'].append(sim)
time.sleep(2)
print(f"\nCollect edginess of every set {size}")
for k in images_of_size.keys():
edg = edginess(list_of_images=images_of_size[k])
scores[f'edg_{k}'].append(edg)
time.sleep(2)
# plotting current results
num_el = len(scores["sim_gen"])
f, ax = plt.subplots(nrows=3, ncols=1, figsize=(10, 15))
ax[0].set_title("Dissimilarity of images within each set")
ax[0].set_xlabel("Size of image")
ax[0].plot(sizes[:num_el][::-1], scores["sim_gen"][::-1], label="generated images", c="red")
ax[0].plot(sizes[:num_el][::-1], scores["sim_real"][::-1], label="real images", c="blue")
ax[0].plot(sizes[:num_el][::-1], scores["sim_test"][::-1], label="test images", c="blue", ls=":")
ax[1].set_title("Dissimilarity of images between sets")
ax[1].set_xlabel("Size of image")
ax[1].plot(sizes[:num_el][::-1], scores["sim_gen_vs_real"][::-1], label="generated vs real images", c="blue")
ax[1].plot(sizes[:num_el][::-1], scores["sim_gen_vs_test"][::-1], label="generated vs test images", c="blue", ls=":")
ax[1].plot(sizes[:num_el][::-1], scores["sim_test_vs_real"][::-1], label="real vs test images", c="green")
ax[2].set_title("Number of corners detected of images within each set")
ax[2].set_xlabel("Size of image")
ax[2].plot(sizes[:num_el][::-1], scores["edg_gen"][::-1], label="generated images", c="red")
ax[2].plot(sizes[:num_el][::-1], scores["edg_real"][::-1], label="real images", c="blue")
ax[2].plot(sizes[:num_el][::-1], scores["edg_test"][::-1], label="test images", c="blue", ls=":")
ax[2].set_yscale('log')
for a in ax:
a.legend()
a.grid(axis="x", which="both")
a.invert_xaxis()
a.set_xscale('log')
plt.tight_layout()
plt.savefig("/home/sasha/Documents/BachelorsProject/Repo/real_bills_results/comp_sizes/0_stats.png", dpi=150)
plt.close("all")
# save examples of images
images_of_size['gen'][0].save(f"/home/sasha/Documents/BachelorsProject/Repo/real_bills_results/comp_sizes/generated_{size}.png")
images_of_size['real'][0].save(f"/home/sasha/Documents/BachelorsProject/Repo/real_bills_results/comp_sizes/real_{size}.png")
images_of_size['test'][0].save(f"/home/sasha/Documents/BachelorsProject/Repo/real_bills_results/comp_sizes/test_{size}.png")
#save scores
frame = pd.DataFrame(scores)
frame.set_index(sizes[:num_el], inplace=True)
frame.to_csv(f"/home/sasha/Documents/BachelorsProject/Repo/real_bills_results/comp_sizes/0_scores.csv", sep=";")
print("#" * 100)
|
KaraLandes/BachelorsProject
|
Repo/compare_data_similarity.py
|
compare_data_similarity.py
|
py
| 8,019 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pathlib.Path",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "tqdm.tqdm",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "PIL.Image.ANTIALIAS",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sewar.full_ref.sam",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "cv2.goodFeaturesToTrack",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "detection.dummy_cnn.dataset.BaseBillOnBackGroundSet",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "glob.glob",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "glob.glob",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "numpy.geomspace",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 201,
"usage_type": "call"
}
] |
20444657924
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
import time
import csv
class Scraper:
def __init__(self, url):
self.driver = webdriver.Chrome("./chromedriver", options=self.set_chrome_options())
self.url = url
self.open_url()
self.content = self.get_content()
def set_chrome_options(self):
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-gpu")
return chrome_options
def open_url(self):
self.driver.get(self.url)
def get_content(self):
content = self.driver.page_source
soup = BeautifulSoup(content, "html.parser")
return soup
# retrieves all elements with a chosen html tag
def get_all_tags(self, tag="h1"):
all_tags = []
for element in self.content.select(tag):
all_tags.append(element.text.strip())
return all_tags
def get_items(self, product_container='div.thumbnail'):
top_items = []
products = self.content.select(product_container)
for elem in products:
title = elem.select('h4 > a.title')[0].text
review_label = elem.select('div.ratings')[0].text
info = {
"title": title.strip(),
"review": review_label.strip()
}
top_items.append(info)
print(top_items)
# return(top_items)
def get_all_products(self, content_container='div.thumbnail'):
all_products = []
products = self.content.select(content_container)
for product in products:
name = product.select('h4 > a')[0].text.strip()
description = product.select('p.description')[0].text.strip()
price = product.select('h4.price')[0].text.strip()
reviews = product.select('div.ratings')[0].text.strip()
image = product.select('img')[0].get('src')
all_products.append({
"name": name,
"description": description,
"price": price,
"reviews": reviews,
"image": image
})
# print(all_products)
return all_products
def quit(self):
self.driver.quit()
def save_product_csv(self, all_products):
keys = all_products[0].keys()
with open('products.csv', 'w', newline='') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(all_products)
if __name__ == "__main__":
urls = [
"https://webscraper.io/test-sites/e-commerce/allinone",
"https://webscraper.io/test-sites/e-commerce/allinone/computers",
"https://webscraper.io/test-sites/e-commerce/allinone/computers/laptops",
"https://webscraper.io/test-sites/e-commerce/allinone/computers/tablets",
"https://webscraper.io/test-sites/e-commerce/allinone/phones",
"https://webscraper.io/test-sites/e-commerce/allinone/phones/touch"
]
start_time = time.time()
for url in urls:
scraper = Scraper(url)
print("products:", scraper.get_all_products())
scraper.quit()
total_time = time.time() - start_time
print("time:", total_time)
|
RasbeeTech/Web-Scraper
|
scraper.py
|
scraper.py
|
py
| 3,381 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "csv.DictWriter",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 104,
"usage_type": "call"
}
] |
18757756190
|
import argparse
import cv2
# ArgParse é usado para captar argumentos passados na chamada do .py no CMD
ap = argparse.ArgumentParser()
# Aqui definimos a label do argumento esperado
ap.add_argument("-i", "--image", required=True,
help= "Path to the image")
# Criamos um dicionário que receberá os valores dos argumentos
# As chaves do dicionário serão as labels criadas no na definição do argumento
args = vars(ap.parse_args())
# A função vars() retorna os valores correspondente ao atributo __dict__ do objeto
# Aqui lemos a imagem que é acessada através do caminho no disco passado como argumento.
# Acessamos o valor em args usando como chave do dicionário args o mesmo valor que a definição do argumento
image = cv2.imread(args["image"])
print("width: {} pixels".format(image.shape[1]))
print("height: {} pixels".format(image.shape[0]))
print("channels: {}".format(image.shape[2]))
print("Matrix shape: {}".format(image.shape))
cv2.imshow("Image", image)
cv2.waitKey(0)
cv2.imwrite("newimage.jpg", image)
|
CarlosAlfredoOliveiraDeLima/Practical-Python-and-OpenCV-Book
|
01 - load_display_save.py
|
01 - load_display_save.py
|
py
| 1,041 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 25,
"usage_type": "call"
}
] |
14868890436
|
from django.views.generic.base import TemplateView
from albums.forms import FileForm
from albums.models import Album, File
from core.decorators import view_decorator
from core.views import ResourceView
class AlbumPage(TemplateView):
template_name = "albums/main.html"
def expose(view):
view.expose = True
return view
@view_decorator(expose)
class AlbumView(ResourceView):
model = Album
@view_decorator(expose)
class FileView(ResourceView):
create_form = FileForm
model = File
|
qrees/backbone-gallery
|
albums/views.py
|
views.py
|
py
| 508 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.views.generic.base.TemplateView",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "core.views.ResourceView",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "albums.models.Album",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "core.decorators.view_decorator",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "core.views.ResourceView",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "albums.forms.FileForm",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "albums.models.File",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "core.decorators.view_decorator",
"line_number": 22,
"usage_type": "call"
}
] |
14987411881
|
from PIL import Image
import os
from tkinter import filedialog
import tkinter as tk
def convert_pdf():
index = 0
path_picture = filedialog.askdirectory()
dire = 'Converted'
path_pdf = os.path.join(path_picture , dire)
os.mkdir(path_pdf)
my_list = os.listdir(path_picture)
for i in my_list:
image = Image.open(r'' + path_picture+'//' + i)
im = image.convert('RGB')
im.save(r''+ path_pdf+'//' + i[:-4] +'.pdf', quality=15, optimze=True)
index = index + 1
root = tk.Tk()
convert_pdf()
tk.mainloop()
|
Elkayamacc/Image2PDF
|
PDFConverterV2.py
|
PDFConverterV2.py
|
py
| 561 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "tkinter.filedialog.askdirectory",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "tkinter.Tk",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tkinter.mainloop",
"line_number": 24,
"usage_type": "call"
}
] |
10819469391
|
import numpy as np
import argparse
import imutils
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-i","--image",required = True, help="Path of Image File")
args = vars(ap.parse_args())
#image = cv2.imread("image.png")
print("Path: ", args["image"])
image = cv2.imread(args["image"])
# find all the 'black' shapes in the image
upper = np.array([15,15,15])
lower = np.array([0,0,0])
shapeMask = cv2.inRange(image,lower,upper)
# find the contours in the mask
cnts = cv2.findContours(shapeMask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
print("Found {} black shapes".format(len(cnts)))
cv2.imshow("Mask", shapeMask)
# loop over the contours
for c in cnts:
# draw the contour and show it
cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
cv2.imshow("Image", image)
cv2.waitKey(0)
|
Pallavi04/ComputerVision
|
FindShapes/shape.py
|
shape.py
|
py
| 837 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.inRange",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.findContours",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_EXTERNAL",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_SIMPLE",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "imutils.grab_contours",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cv2.drawContours",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 30,
"usage_type": "call"
}
] |
18798291843
|
import matplotlib.pyplot as plt
import random
import numpy as np
from IPython.display import display, clear_output
import time
def head_home(x, y):
"""
Head home down and to the left.
Parameters
----------
x : float
Horizontal coordinate.
y : float
Vertical coordinate.
Returns
-------
x : float
Updated horizontal coordinate.
y : float
Updated vertical coordinate.
"""
pick = np.zeros(x + y)
pick[0:x] = 1
if (np.random.choice(pick) == 1):
x -= 1
else:
y -= 1
if (x < 0):
x = 0
if (y < 0):
y = 0
return x, y
def search_for_food(x, y, smell):
"""
Search for food by following the smell.
Parameters
----------
x : float
Horizontal coordinate.
y : float
Vertical coordinate.
smell : numpy.ndarray
2D array of smells
Returns
-------
x : float
Updated horizontal coordinate.
y : float
Updated vertical coordinate.
"""
directions = ['up', 'left', 'down', 'right']
x_dim = smell.shape[0]
y_dim = smell.shape[1]
# First check to see if there is food up and to the right.
g = [] # follow gradient
m = []
if (x + 1 < x_dim):
if (smell[x + 1, y] > 0):
m.append(smell[x + 1, y])
g.append('right')
if (y + 1 < y_dim):
if (smell[x, y + 1] > 0):
m.append(smell[x, y + 1])
g.append('up')
if (g != []):
grad = g[m.index(max(m))]
# print("Following smell", grad)
else:
# else just pick a random direction.
grad = random.choice(directions)
# print("Choosing ",grad)
# move the ant
if (grad == 'up'):
y = y + 1
elif (grad == 'right'):
x = x + 1
elif (grad == 'down'):
y = y - 1
elif (grad == 'left'):
x = x - 1
else:
print(grad)
print("ERROR!!!!!!!!!!!!")
# make sure we don't go off the gird.
if (x < 0):
x = 0
if (y < 0):
y = 0
if (x > x_dim - 1):
x = x_dim - 1
if (y > y_dim - 1):
y = y_dim - 1
return x, y
def run(num_ants=100, x_dim=70, y_dim=30):
"""
Run the simulation
Parameters
----------
num_ants : int
Initial number of ants to simulate. Dafualt =100
x_dim : int
Horizontal dimension of the board. Default = 70
y_dim : int
Vertical dimension of the board. Default = 30
"""
smell = np.zeros((x_dim, y_dim))
food = np.zeros((x_dim, y_dim))
# place food
food[45:50, 25:30] = 10
food[45:50, 25:30] = 10
food[65:70, 0:5] = 10
x_loc = np.random.randint(0, x_dim, size=(num_ants, 1))
y_loc = np.random.randint(0, y_dim, size=(num_ants, 1))
ant_loc = np.concatenate((x_loc, y_loc), axis=1)
has_food = np.zeros((num_ants, 1))
fig, ax = plt.subplots(figsize=(10, 5))
# Main simulation loop
for i in range(0, 100):
# Loop over ants
for a in range(0, num_ants):
x = ant_loc[a, 0]
y = ant_loc[a, 1]
if (x == 0 and y == 0):
has_food[a] = 0
if has_food[a] > 0:
x, y = head_home(x, y)
smell[x, y] = smell[x, y] + 100
else:
x, y = search_for_food(x, y, smell)
if food[x, y] > 0:
food[x, y] -= 1
has_food[a] = 1
ant_loc[a, 0] = x
ant_loc[a, 1] = y
smell = smell - 1
smell[smell < 0] = 0
# plot world
plt.imshow(food.T, origin='lower', aspect='equal', cmap="magma")
for a in range(0, num_ants):
color = 'r'
if (has_food[a] > 0):
color = 'g'
plt.scatter(ant_loc[a, 0], ant_loc[a, 1], color=color)
# Animaiton part (dosn't change)
clear_output(wait=True) # Clear output for dynamic display
display(fig) # Reset display
fig.clear() # Prevent overlapping and layered plots
time.sleep(0.0001) # Sleep for a fraction of a second to allow animation to catch up
|
msu-cmse-courses/cmse202-F22-data
|
code_samples/ant_function.py
|
ant_function.py
|
py
| 4,304 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "numpy.zeros",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "random.choice",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "IPython.display.clear_output",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "IPython.display.display",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 192,
"usage_type": "call"
}
] |
6118401140
|
''' Урок 2. Парсинг HTML. BeautifulSoup, MongoDB
Необходимо собрать информацию о вакансиях на вводимую должность (используем input) с сайтов Superjob(необязательно) и HH(обязательно). Приложение должно анализировать несколько страниц сайта (также вводим через input).
Получившийся список должен содержать в себе минимум:
Наименование вакансии.
Предлагаемую зарплату (отдельно минимальную и максимальную).
Ссылку на саму вакансию.
Сайт, откуда собрана вакансия.
По желанию можно добавить ещё параметры вакансии (например, работодателя и расположение). Структура должна быть одинаковая для вакансий с обоих сайтов. Общий результат можно вывести с помощью dataFrame через pandas.
'''
from bs4 import BeautifulSoup as bs
import requests
import json
class HHscraper:
def __init__(self, start_url, headers, params):
self.start_url = start_url
self.start_headers = headers
self.start_params = params
self.info_vacance = []
def get_html_string(self, url, headers='', params=''):
try:
response = requests.get(url, headers=headers, params=params)
if response.ok:
return response.text
except Exception as e:
sleep(1)
print(e)
return None
@staticmethod
def get_dom(html_string):
return bs(html_string, "html.parser")
def run(self):
next_butten_hh = ''
while next_butten_hh != None:
if next_butten_hh == '':
html_string = self.get_html_string(self.start_url + '/search/vacancy', self.start_headers, self.start_params)
else:
html_string = self.get_html_string(next_butten_hh)
soup = HHscraper.get_dom(html_string)
vacance_list = soup.findAll('div', attrs={'class': 'vacancy-serp-item'})
self.get_info_from_element(vacance_list)
try:
next_butten_hh = self.start_url + soup.find('a', attrs={'data-qa': 'pager-next'}).attrs["href"]
except Exception as e:
next_butten_hh = None
def get_info_from_element(self, vacance_list):
for vacance in vacance_list:
vacance_data = {}
vacance_name = vacance.find('a', {'class': 'bloko-link'}).getText()
vacance_city = vacance.find('div', {'data-qa': 'vacancy-serp__vacancy-address'}).getText()
vacance_link = vacance.find('a', {'class': 'bloko-link'}).attrs["href"]
vacance_data['имя вакансии'] = vacance_name
vacance_data['город'] = vacance_city
vacance_data['ссылка на вакансию'] = vacance_link
vacance_data['источник'] = self.start_url
self.get_salary(vacance_data, vacance)
self.info_vacance.append(vacance_data)
def get_salary(self, vacance_data, vacance):
try:
vacance_salary = vacance.find('span', {'data-qa': 'vacancy-serp__vacancy-compensation'}).getText()
vacance_salary = vacance_salary.replace('\u202f', '').split()
if '–' in vacance_salary:
vacance_data['мин зарплата'] = float(vacance_salary[0])
vacance_data['макс зарплата'] = float(vacance_salary[2])
vacance_data['валюта'] = vacance_salary[-1]
elif 'от' in vacance_salary:
vacance_data['мин зарплата'] = float(vacance_salary[1])
vacance_data['валюта'] = vacance_salary[-1]
elif 'до' in vacance_salary:
vacance_data['макс зарплата'] = float(vacance_salary[1])
vacance_data['валюта'] = vacance_salary[-1]
except Exception as e:
vacance_data['зарплата'] = None
def save_info_vacance(self):
with open("vacancy_hh.json", 'w', encoding="utf-8") as file:
json.dump(self.info_vacance, file, indent=2, ensure_ascii=False)
class SJscraper:
def __init__(self, start_url, headers, params):
self.start_url = start_url
self.start_headers = headers
self.start_params = params
self.info_sj_vacance = []
def get_html_string(self, url, headers='', params=''):
try:
response = requests.get(url, headers=headers, params=params)
if response.ok:
return response.text
except Exception as e:
sleep(1)
print(e)
return None
@staticmethod
def get_dom(html_string):
return bs(html_string, "html.parser")
def run(self):
next_butten_sj = ''
while next_butten_sj != None:
if next_butten_sj == '':
html_string = self.get_html_string(self.start_url + "vacancy/search/", self.start_headers,
self.start_params)
else:
html_string = self.get_html_string(next_butten_sj)
soup = SJscraper.get_dom(html_string)
vacance_list = soup.findAll('div', attrs={'class': 'Fo44F QiY08 LvoDO'})
self.get_info_from_element(vacance_list)
try:
next_butten_sj = main_link_sj + soup.find('a', attrs={'class': 'f-test-button-dalshe'}).attrs["href"]
except Exception as e:
next_butten_sj = None
def get_info_from_element(self, vacance_list):
for vacancy in vacance_list:
vacancy_sj_data = {}
vacancy_sj_name = vacancy.find('a', {'class': 'icMQ_'}).getText()
# vacance_sj_city = vacancy.find('span', {'class': 'f-test-text-company-item-location _2LcRC _1_rZy dXrZh Ml4Nx'}).getText()
vacancy_sj_link = main_link_sj + vacancy.find('a', {'class': 'icMQ_'}).attrs["href"]
vacancy_sj_data['имя вакансии'] = vacancy_sj_name
# vacance_sj_city['город'] = vacance_sj_city
vacancy_sj_data['ссылка на вакансию'] = vacancy_sj_link
vacancy_sj_data['источник'] = self.start_url
self.get_salary(vacancy_sj_data, vacancy)
self.info_sj_vacance.append(vacancy_sj_data)
def get_salary(self, vacancy_sj_data, vacancy):
try:
vacancy_sj_salary = vacancy.find("span",
{'class': "_1OuF_ _1qw9T f-test-text-company-item-salary"}).getText()
if '—' in vacancy_sj_salary:
sal = vacancy_sj_salary.replace('\xa0', ' ').split()
if sal[0].isdigit() and sal[1].isdigit():
mim_sal = sal[0] + sal[1]
vacancy_sj_data['мин зарплата'] = float(mim_sal)
else:
vacancy_sj_data['мин зарплата'] = float(sal[0])
if sal[-3].isdigit() and sal[-2].isdigit():
max_sal = sal[-3] + sal[-2]
vacancy_sj_data['макс зарплата'] = float(max_sal)
else:
vacancy_sj_data['макс зарплата'] = float(sal[-3])
vacancy_sj_data['валюта'] = sal[-1]
elif 'По' in vacancy_sj_salary:
vacancy_sj_data['зарплата'] = "По договоренности"
vacancy_sj_data['валюта'] = None
elif 'от' in vacancy_sj_salary:
sal = vacancy_sj_salary.replace('\xa0', ' ').split()
if sal[1].isdigit() and sal[2].isdigit():
mim_sal = sal[1] + sal[2]
vacancy_sj_data['мин зарплата'] = float(mim_sal)
else:
vacancy_sj_data['мин зарплата'] = float(sal[1])
vacancy_sj_data['валюта'] = sal[-1]
elif 'до' in vacancy_sj_salary:
sal = vacancy_sj_salary.replace('\xa0', ' ').split()
if sal[1].isdigit() and sal[2].isdigit():
max_sal = sal[1] + sal[2]
vacancy_sj_data['макс зарплата'] = float(max_sal)
else:
vacancy_sj_data['макс зарплата'] = float(sal[1])
vacancy_sj_data['валюта'] = sal[-1]
else:
sal = vacancy_sj_salary.replace('\xa0', ' ').split()
if sal[0].isdigit() and sal[1].isdigit():
user_sal = sal[0] + sal[1]
vacancy_sj_data['макс зарплата'] = float(user_sal)
except:
vacancy_sj_data['зарплата'] = None
def save_info_vacance(self):
with open("vacancy_sj.json", 'w', encoding="utf-8") as file:
json.dump(self.info_sj_vacance, file, indent=2, ensure_ascii=False)
if __name__ == '__main__':
user_find = input('Введите вакансию: ')
#user_find = 'python'
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36"}
main_link_hh = "https://hh.ru"
params_main_hh = {"area": "1",
"fromSearchLine": "true",
"st": "searchVacancy",
"text": user_find,
"page": "0"}
scraper_hh = HHscraper(main_link_hh, headers, params_main_hh)
scraper_hh.run()
scraper_hh.save_info_vacance()
main_link_sj = "https://www.superjob.ru/"
params_sj = {"keywords": user_find,
"geo[t][0]": "4"}
scraper_sj = SJscraper(main_link_sj, headers, params_sj)
scraper_sj.run()
scraper_sj.save_info_vacance()
|
XYI7I/GeekBrains
|
AI/Method_collecting_Internet_data/Lesson2/lesson2.py
|
lesson2.py
|
py
| 10,254 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 193,
"usage_type": "call"
}
] |
27022143594
|
from pymongo import MongoClient
import pprint
from urllib.request import urlopen
from bs4 import BeautifulSoup
class Data_extraction_creation:
def __init__(self):
self.source=""
self.search=""
self.search_length=0
def getting_source(self):
#client=MongoClient("mongodb://127.0.0.1:27017")
#database=client['testing']
self.file_name=input("Enter the name of the text file to read the source code :\n")
self.file_name = self.file_name + ".txt"
self.file_open=open(self.file_name, 'r')
self.file2=self.file_open.read()
self.file=BeautifulSoup(self.file2)
print(self.file + "\n\n")
self.search="small text-uber-white"
search_length=len(self.search)
c=0
for i in range(0, len((self.file))-search_length): # for total counting part
substr = self.file[i:i+search_length]
if self.search == substr:
c = c + 1
if c == 3: # got the total time of the day
self.time_total = self.file[i+search_length+2: i+search_length+12]
if c==4: # got the total distance of the day
self.distance_total = self.file[i+search_length+2:i+search_length+7] + " km"
if c==5: # got the total cash collection
self.cash_collection_total = self.file[i+search_length+2:i+search_length+10]
if c==6: # got the total earnings
self.earnings_total = self.file[i+search_length+2: i+search_length+10]
break
#print(self.time_total + self.distance_total + self.cash_collection_total + self.earnings_total)
self.search='<p class="portable-soft-huge--right submenu__item__link layout cursor--pointer"><span class="layout__item portable-one-half one-half">' # first day
search_length=len(self.search)
c=0
day=""
#collection=database[day]
day_last_left=0
for i in range(0, len((self.file))-search_length): # counting individual trip of that day.
substr = self.file[i:i+search_length]
if self.search == substr:
trip_number=-1
pos=i
pos_span_ending=0
ending_span=""
for oo in range(1, 1000):
ss=self.file[pos + oo: pos+oo+7]
if ss=="</span>":
pos_span_ending=pos+oo
c = c + 1 # day count
day = self.file[i+search_length+1:pos_span_ending+1]
s_trip_start='<span class="trip-list__date layout__item one-quarter">'
s_trip_time='<span class="trip-list__duration layout__item one-quarter">'
s_trip_distance='<span class="trip-list__distance layout__item one-quarter"'
s_trip_earning='<span class="soft-tiny--left"'
span_endings='</span>'
s_trip_start_l=len(s_trip_start)
s_trip_time_l=len(s_trip_time)
s_trip_distance_l=len(s_trip_distance)
s_trip_earning_l=len(s_trip_earning)
e_trip_start=0
e_trip_time=0
e_trip_distance=0
e_trip_earning=0
check=2
trip_number = trip_number + 1
# trip time
for r in range(e_trip_time, len(self.file)- s_trip_time_l):
t = self.file[ e_trip_time + r : e_trip_time + r + s_trip_time_l ]
check=2
if t == s_trip_time:
start = r + s_trip_time_l +1
for m in range(1,100): # trip time findings
now=self.file[r+m: r+m+7]
if now==span_endings:
e_trip_time=r+m+7
self.trip_time=self.file[start : e_trip_time + 1 ]
check=0
break
if trip_number==0:
continue
if check==0:
check=2
break
# trip start time
for r in range(e_trip_start, len(self.file)- s_trip_start_l):
t = self.file[ e_trip_start + r : e_trip_start + r + s_trip_start_l ]
check=2
if t == s_trip_start:
start = r + s_trip_start_l +1
for m in range(1,100): # trip time findings
now=self.file[r+m: r+m+7]
if now==span_endings:
e_trip_start=r+m+7
self.trip_start=self.file[start : e_trip_start + 1 ]
check=0
break
if trip_number==0:
continue
if check==0:
check=2
break
#trip distance
for r in range(e_trip_distance, len(self.file)- s_trip_distance_l):
t = self.file[ e_trip_distance + r : e_trip_distance + r + s_trip_distance_l ]
check=2
if t== s_trip_distance:
start = r + s_trip_distance_l +1
for m in range(1,100): # trip time findings
now=self.file[r+m: r+m+7]
if now==span_endings:
e_trip_distance=r+m+7
self.trip_distance=self.file[start : e_trip_distance + 1 ]
check=0
break
if trip_number==0:
continue
if check==0:
check=2
break
# trip earnings
for r in range(e_trip_earning, len(self.file)- s_trip_earning_l):
t = self.file[ e_trip_earning + r : e_trip_earning + r + s_trip_earning_l ]
check=2
if t==s_trip_earning:
start = r + s_trip_earning_l +1
for m in range(1,100): # trip time findings
now=self.file[r+m: r+m+7]
if now==span_endings:
e_trip_earning=r+m+7
self.trip_earning=self.file[start : e_trip_earning + 1 ]
check=0
break
if trip_number==0:
continue
if check==0:
check=2
break
# completed trips calcultaion for one trip.
print("Day "+day)
print("Trip number "+str(trip_number))
print("Trip starting "+self.trip_start)
print("Trip time "+self.trip_time)
print("Trip distance "+self.trip_distance)
print("Trip earnings "+self.trip_earning)
object= Data_extraction_creation()
object.getting_source()
|
Harkishen-Singh/Uber-App-Record-Analysis
|
creating databasse copy.py
|
creating databasse copy.py
|
py
| 7,444 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "bs4.BeautifulSoup",
"line_number": 24,
"usage_type": "call"
}
] |
31356164054
|
import os
import argparse
import re
import textwrap
default_mpi_function_list = [
"int MPI_Init(int *argc, char ***argv)",
"int MPI_Finalize(void)",
"int MPI_Comm_rank(MPI_Comm comm, int *rank)",
"int MPI_Comm_size(MPI_Comm comm, int *size)",
"int MPI_Send(const void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm)",
"int MPI_Recv(void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status *status)"
]
def extract_between(text, sub1, sub2, nth=1):
"""
extract a substring from text between two given substrings
sub1 (nth occurrence) and sub2 (nth occurrence)
arguments are case sensitive
"""
# prevent sub2 from being ignored if it's not there
if sub2 not in text.split(sub1, nth)[-1]:
return None
return text.split(sub1, nth)[-1].split(sub2, nth)[0]
def get_args_list(args_name, args_type, args_post):
d = {}
d["pargs"] = ""
d["args"] = ""
for idy,function_name in enumerate(args_name):
d["pargs"] += args_type[idy]
d["pargs"] += " "
d["pargs"] += args_name[idy]
d["pargs"] += args_post[idy]
d["pargs"] += ", "
d["args"] += args_name[idy]
d["args"] += ", "
if(len((d["pargs"])) > 0):
if(d["pargs"][-2] == ','):
d["pargs"] = d["pargs"][:-2]
if(d["args"][-2] == ','):
d["args"] = d["args"][:-2]
return d
def get_ret_list(rtype):
d = {}
dec_ret_val = ""
get_ret_val = ""
ret_ret_val = "return"
if(rtype != "void"):
dec_ret_val += rtype + " val = ("+rtype+") 0;"
get_ret_val += "val = "
ret_ret_val += " val"
ret_ret_val += ";"
d["dec"] = dec_ret_val
d["get"] = get_ret_val
d["ret"] = ret_ret_val
return d
def parse_mpi_functions(mpi_functions_list):
d={}
d["name"] = []
d["type"] = []
d["args"] = {}
d["args"]["type"] = []
d["args"]["name"] = []
d["args"]["post"] = []
for function in mpi_functions_list:
d["name"] += [function.split()[1].split('(')[0]]
d["type"] += [function.split()[0]]
args_list = extract_between(function, '(', ')')
name_list = []
type_list = []
post_list = []
tmp = ""
for mpi_args in args_list.split(','):
mpi_arg = mpi_args.split()
if(len(mpi_arg) > 1):
tmp_idx = mpi_arg[-1].strip('*').find("[")
if(tmp_idx < 0):
tmp_idx = len(mpi_arg[-1].strip('*'))
name_list += [mpi_arg[-1].strip('*')[0:tmp_idx]]
tmp = mpi_arg[0]
if(tmp == "const"):
tmp += " " + mpi_arg[1]
for idx in range(0,mpi_args.count('*')):
tmp += ' *'
type_list += [tmp]
if("[" in mpi_arg[-1]):
post_list += ["[]"]
else:
post_list += [""]
d["args"]["name"] += [name_list]
d["args"]["type"] += [type_list]
d["args"]["post"] += [post_list]
return d
def get_mpi_proto_list(d):
l = []
for idx,function in enumerate(d["name"]):
proto = d["type"][idx]+" "+d["name"][idx]+"("
for idy,function_name in enumerate(d["args"]["name"][idx]):
proto += d["args"]["type"][idx][idy]
proto += " "
proto += d["args"]["name"][idx][idy]
proto += d["args"]["post"][idx][idy]
proto += ", "
if(proto[-2] == ','):
proto = proto[:-2]
proto += ")"
l += [proto]
return l
def print_selfie_h_header():
s = ""
s += '''#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <cstring>
#include <execinfo.h>
#include <dlfcn.h>
#include <cstdarg>
#include <fenv.h>
#pragma STDC FENV_ACCESS ON
typedef void (*function_type)(...);
'''
return s
def print_selfie_h_footer():
s = ""
s += '''
}
'''
return s
def print_selfie_h_n_mpi(d, plugin_name):
s = '''
/// \\brief Total number of {1} functions
#define N_{1}_FUNCTIONS {0}
'''.format(str(len(d["name"])), plugin_name.upper())
return s
def print_selfie_h_get_name(d,plugin_name):
s = ""
s +='''/// \\brief Return a string containing name of functions
/// \\param[in] i Index
/// \\return Return a string containing name of functions
///
char *selfie_get_{0}_function_name(int i)
{{
char const *{0}_functions_name[] = {{
'''.format(plugin_name)
for name in d["name"]:
s += ''' "{0}",\n'''.format(name)
for name in d["name"]:
s += ''' "P{0}",\n'''.format(name)
s += ''' NULL
}};
return strdup({0}_functions_name[i]);
}};
'''.format(plugin_name)
return s
def print_selfie_h_builtin_function(idx, name, symbol, rtype, plugin_name):
d_ret = get_ret_list(rtype)
s = '''
#ifdef __SELFIE_MPI_BUILTIN__
/// \\brief {1}
///
/// \\param ...
/// \\return {3}
///
/// \details
///
{3} {1}(...)
{{
double f_start = 0.0;
function_type selfie_function = NULL;
int ap_except = 0;
selfie_function = selfie_{4}_pointer_functions[{0}];
if(selfie_function == NULL)
{{
selfie_function = (function_type) dlsym(RTLD_NEXT,"{2}");
}}
selfie_{4}_global_data[{0}].function_count++;
f_start = selfie_mysecond();
ap_except = fedisableexcept(FE_INVALID);
void* ret = __builtin_apply(selfie_function,
__builtin_apply_args(), 1024);
feclearexcept(FE_INVALID);
feenableexcept(ap_except);
selfie_{4}_global_data[{0}].function_time += selfie_mysecond() - f_start;
__builtin_return(ret);
}};
#endif
'''.format(idx, name, symbol, rtype, plugin_name)
return s
def print_selfie_h_functions(d,plugin_name):
s = ""
for idx,name in enumerate(d["name"]):
s += print_selfie_h_builtin_function(idx, name, name,
d["type"][idx], plugin_name)
s += print_selfie_h_builtin_function(idx,
"P"+name, name, d["type"][idx],
plugin_name)
return s
def print_selfie_h_global_array(d,plugin_name):
s = '''
/// \\brief Array of pointers of functions
function_type selfie_{1}_orig_pointer_functions[{0}] = {{NULL}};
/// \\brief Array of pointers of functions
function_type *selfie_{1}_pointer_functions = selfie_{1}_orig_pointer_functions;
'''.format(len(d["name"]),plugin_name)
return s
def print_selfie_h(d,pname):
s = ""
s += print_selfie_h_header()
s += print_selfie_h_n_mpi(d, pname)
s += print_selfie_h_get_name(d, pname)
s += print_selfie_h_global_array(d, pname)
s += "\nextern \"C\" {\n\n"
s += print_selfie_h_functions(d, pname)
s += print_selfie_h_footer()
return s
def read_inputfile(inputfile):
function_list = []
with open(inputfile,"r") as fdi:
for line in fdi:
if (len(line) > 1):
function_list += [line[:-1]]
return function_list
def main():
parser = argparse.ArgumentParser(
description="Generate list of MPI functions")
parser.add_argument("-p","--proto",action="store_true",
default=False,
help="Print list of MPI functions prototypes")
parser.add_argument("-i","--input",action="store",
default=None,
help="File containing MPI functions list")
parser.add_argument("-n","--name",action="store",
default="mpi",
help="Name of plugin")
parser.add_argument("-o","--output",action="store",
default=None,
help="File where to print "+
"result (If None, print to stdout)")
args = parser.parse_args()
print("")
print(parser.description)
print("")
header = True
# Print proto or not
if(args.proto == True):
header = False
# Input file
if(args.input != None):
mpi_function_list = read_inputfile(args.input)
else:
mpi_function_list = default_mpi_function_list
# Output file
if(args.output != None):
outfile = args.output
else:
outfile = None
pname = args.name
# Parse functions
d = parse_mpi_functions(mpi_function_list)
# Print prototypes
if(header == False):
if(outfile == None):
for proto_name in get_mpi_proto_list(d):
print(proto_name)
else:
with open(outfile,"w") as fd:
for proto_name in get_mpi_proto_list(d):
fd.write(proto_name)
print("File "+outfile+" written")
# Print header
else:
if(outfile == None):
print(print_selfie_h(d,pname))
else:
with open(outfile,"w") as fd:
fd.write(print_selfie_h(d,pname))
print("File "+outfile+" written")
if __name__ == "__main__": main()
|
cea-hpc/selFIe
|
src/parse_mpi.py
|
parse_mpi.py
|
py
| 9,164 |
python
|
en
|
code
| 16 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 260,
"usage_type": "call"
}
] |
38815716976
|
import argparse
import asyncio
import csv
import functools
import gc
import hashlib
import http.client
import importlib
import io
import math
import platform
import re
import socket
import statistics
import sys
import textwrap
import time
import urllib.parse
from typing import Callable, Awaitable, Tuple, Iterable, Optional
_Method = Callable[[str], bytes]
_AMethod = Callable[[str], Awaitable[bytes]]
METHODS = {}
CHECKSUMS = {
10**6 + 128: 'fa82243e0db587af04504f5d3229ff7227f574f8f938edaad8be8e168bc2bc87',
10**7 + 128: '128ceaac08362426bb7271ed6202d11c6830587a415bd7868359725c22d2fe88',
10**9 + 128: 'd699e2c306b897609be6222315366b25137778e18f8634c75b006cef50647978'
}
def method(name: str, requires: Iterable[str] = ()) -> Callable[[_Method], _Method]:
def decorate(func: _Method) -> _Method:
for mod in requires:
try:
importlib.import_module(mod)
except ImportError:
return func
METHODS[name] = func
return func
return decorate
def run_async(func: _AMethod) -> _Method:
@functools.wraps(func)
def wrapper(url: str) -> bytes:
loop = asyncio.new_event_loop()
try:
asyncio.set_event_loop(loop)
return loop.run_until_complete(func(url))
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
return wrapper
@method('httpclient')
def load_httpclient(url: str) -> bytes:
parts = urllib.parse.urlparse(url)
conn = http.client.HTTPConnection(parts.netloc)
conn.request('GET', parts.path)
resp = conn.getresponse()
return resp.read(resp.length) # type: ignore
@method('httpclient-na')
def load_httpclient_na(url: str) -> bytes:
parts = urllib.parse.urlparse(url)
conn = http.client.HTTPConnection(parts.netloc)
conn.request('GET', parts.path)
resp = conn.getresponse()
return resp.read()
@method('requests', ['requests'])
def load_requests(url: str) -> bytes:
import requests
return requests.get(url).content
@method('requests-c1M', ['requests'])
def load_requests_c1M(url: str) -> bytes:
import requests
old_chunk = requests.models.CONTENT_CHUNK_SIZE
try:
requests.models.CONTENT_CHUNK_SIZE = 1024 * 1024
return requests.get(url).content
finally:
requests.models.CONTENT_CHUNK_SIZE = old_chunk
@method('requests-stream', ['requests'])
def load_requests_stream(url: str) -> bytes:
import requests
with requests.get(url, stream=True) as resp:
return resp.raw.read()
@method('requests-stream-fp-read', ['requests'])
def load_requests_stream_fp_read(url: str) -> bytes:
import requests
with requests.get(url, stream=True) as resp:
return resp.raw._fp.read()
@method('requests-np', ['requests', 'numpy'])
def load_requests_np(url: str) -> bytes:
import requests
import numpy as np
with requests.get(url, stream=True) as resp:
data = np.empty(int(resp.headers['Content-length']), np.uint8)
resp.raw.readinto(memoryview(data))
return data
@method('requests-np-fp', ['requests', 'numpy'])
def load_requests_np(url: str) -> bytes:
import requests
import numpy as np
with requests.get(url, stream=True) as resp:
data = np.empty(int(resp.headers['Content-length']), np.uint8)
resp.raw._fp.readinto(memoryview(data))
return data
@method('urllib3', ['urllib3'])
def load_urllib3(url: str) -> bytes:
import urllib3
return urllib3.PoolManager().request('GET', url).data
@method('tornado', ['tornado'])
@run_async
async def load_tornado(url: str) -> bytes:
import tornado.simple_httpclient
client = tornado.simple_httpclient.SimpleAsyncHTTPClient(max_body_size=10**10)
response = await client.fetch(url)
return response.body
@method('aiohttp', ['aiohttp'])
@run_async
async def load_aiohttp(url: str) -> bytes:
import aiohttp
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
return await resp.read()
@method('httpx', ['httpx'])
def load_httpx(url: str) -> bytes:
import httpx
return httpx.get(url).content
@method('httpx-async', ['httpx'])
@run_async
async def load_httpx_async(url: str) -> bytes:
import httpx
async with httpx.AsyncClient() as client:
r = await client.get(url)
return r.content
def prepare_socket(url: str) -> Tuple[io.BufferedIOBase, int]:
parts = urllib.parse.urlparse(url)
address = (parts.hostname, parts.port)
sock = socket.socket()
sock.connect(address)
req_header = textwrap.dedent(f'''\
GET {parts.path} HTTP/1.1
Host: {parts.hostname}:{parts.port}
User-Agent: python
Connection: close
Accept: */*
''').replace('\n', '\r\n').encode('ascii')
fh = sock.makefile('rwb')
fh.write(req_header)
fh.flush()
content_length: Optional[int] = None
while True:
line = fh.readline()
if line == b'\r\n':
if content_length is None:
raise RuntimeError('Did not receive Content-Length header')
return fh, content_length # type: ignore
else:
text = line.decode('latin-1').rstrip().lower()
if text.startswith('content-length: '):
content_length = int(text.split(' ')[1])
@method('socket-read')
def load_socket_read(url: str) -> bytes:
fh, content_length = prepare_socket(url)
return fh.read(content_length)
@method('socket-readinto')
def load_socket_readinto(url: str) -> bytes:
fh, content_length = prepare_socket(url)
raw = bytearray(content_length)
n = fh.readinto(raw)
assert n == content_length
return memoryview(raw)[:n]
def validate(data: bytes):
size = len(data)
try:
checksum = CHECKSUMS[size]
except KeyError:
print('No checksum found')
else:
actual_checksum = hashlib.sha256(data).hexdigest()
if actual_checksum != checksum:
print(f'Checksum mismatch ({actual_checksum} != {checksum})')
def measure_method(method: str, args: argparse.Namespace) -> None:
# Warmup pass
METHODS[method](args.url)
rates = []
size = 0
for i in range(args.passes):
gc.collect()
start = time.monotonic()
data = METHODS[method](args.url)
stop = time.monotonic()
elapsed = stop - start
rates.append(len(data) / elapsed)
if i == 0:
validate(data)
size = len(data)
del data
mean = statistics.mean(rates)
std = statistics.stdev(rates) / math.sqrt(args.passes - 1)
return mean, std, size
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--passes', type=int, default=5)
parser.add_argument('--csv', action='store_true')
parser.add_argument('method')
parser.add_argument('url')
args = parser.parse_args()
if args.method not in METHODS and args.method != 'all':
parser.error('Method must be "all" or one of {}'.format(set(METHODS.keys())))
if args.csv:
writer = csv.DictWriter(sys.stdout, ['Python', 'Method', 'Size', 'mean', 'std'])
writer.writeheader()
match = re.search(r'PyPy \S+', sys.version)
if match:
version = match.group(0)
else:
version = platform.python_version()
if args.method == 'all':
methods = METHODS
else:
methods = [args.method]
for method in methods:
mean, std, size = measure_method(method, args)
if args.csv:
writer.writerow(
{
'Python': version,
'Method': method,
'Size': size,
'mean': mean,
'std': std
}
)
else:
print('{}: {:.1f} ± {:.1f} MB/s'.format(method, mean / 1e6, std / 1e6))
if __name__ == '__main__':
main()
|
ska-sa/pyconza2020-httpbench
|
httpbench.py
|
httpbench.py
|
py
| 8,026 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "typing.Callable",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.Awaitable",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "importlib.import_module",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "typing.Callable",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "asyncio.new_event_loop",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "asyncio.set_event_loop",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "urllib.parse.parse.urlparse",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "urllib.parse.parse",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "urllib.parse",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "http.client.client.HTTPConnection",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "http.client.client",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "http.client",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "urllib.parse.parse.urlparse",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "urllib.parse.parse",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "urllib.parse",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "http.client.client.HTTPConnection",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "http.client.client",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "http.client",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "requests.models",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "requests.models",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "requests.models",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "urllib3.PoolManager",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "tornado.simple_httpclient.simple_httpclient.SimpleAsyncHTTPClient",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "tornado.simple_httpclient.simple_httpclient",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "tornado.simple_httpclient",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "aiohttp.ClientSession",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "httpx.get",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "httpx.AsyncClient",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "urllib.parse.parse.urlparse",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "urllib.parse.parse",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "urllib.parse",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "socket.socket",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "textwrap.dedent",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "io.BufferedIOBase",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "hashlib.sha256",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "argparse.Namespace",
"line_number": 223,
"usage_type": "attribute"
},
{
"api_name": "gc.collect",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "time.monotonic",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "time.monotonic",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "statistics.mean",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "statistics.stdev",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "csv.DictWriter",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 255,
"usage_type": "attribute"
},
{
"api_name": "re.search",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "sys.version",
"line_number": 257,
"usage_type": "attribute"
},
{
"api_name": "platform.python_version",
"line_number": 261,
"usage_type": "call"
}
] |
34565307158
|
from random import random, randint
from collections import deque
from math import sin, cos
MAXVAL = 200
MAXINSTR = 12
def new_random_code(length):
return [
(randint(0, MAXINSTR)) if random() > 0.5 else (randint(MAXINSTR + 1, MAXVAL))
for _ in range(length)
]
def point_mutate(code):
code[randint(0, len(code) - 1)] = (
(randint(0, MAXINSTR)) if random() > 0.5 else (randint(MAXINSTR + 1, MAXVAL))
)
def safe_pop(stack, default=0):
try:
return stack.pop()
except IndexError:
return default
def grow_bud(pos, code, n):
offspring = []
history = deque()
ang = 0
stack = deque()
x, y = pos
for instruction in code:
if instruction > 12: # number
stack.append(instruction - 13)
else:
if instruction == 1: # rotCW
history.append((x, y, ang))
ang += safe_pop(stack)
elif instruction == 2: # rotCCW
history.append((x, y, ang))
ang -= safe_pop(stack)
elif instruction == 3: # undo
x, y, ang = safe_pop(history, (x, y, ang))
elif instruction == 4: # move
history.append((x, y, ang))
dist = safe_pop(stack)
x -= sin(ang) * dist
y += cos(ang) * dist
elif instruction == 5: # place
offspring.append((x, y))
elif instruction == 6: # ref n
stack.append(n)
elif instruction == 7: # +
stack.append(safe_pop(stack) + safe_pop(stack))
elif instruction == 8: # -
stack.append(safe_pop(stack) - safe_pop(stack))
elif instruction == 9: # *
stack.append(safe_pop(stack) * safe_pop(stack))
elif instruction == 10: # /
try:
stack.append(safe_pop(stack) / safe_pop(stack, 1))
except ZeroDivisionError:
pass
elif instruction == 11: # ref x
stack.append(x)
elif instruction == 12: # ref y
stack.append(y)
return offspring
def grow_tree(code, iters=3):
bud_positions = [(0, 0)]
branch_positions = []
for n in range(iters):
new_bud_positions = []
for bud_pos in bud_positions:
for new_pos in grow_bud(bud_pos, code, n):
branch_positions.append((*bud_pos, *new_pos))
new_bud_positions.append(new_pos)
bud_positions = new_bud_positions
return bud_positions, branch_positions
|
gwfellows/trees
|
grow.py
|
grow.py
|
py
| 2,644 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "random.random",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 53,
"usage_type": "call"
}
] |
34218646786
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 6 12:31:40 2023
@author: tillappel
"""
from arc import *
from IPython.display import display, HTML
import numpy as np
import scipy.constants as sc
import matplotlib.pyplot as plt
def find_largest_c3(n,n_2, l0, j0):
largest_c3_d0 = 0
largest_c3_d1 = 0
largest_i_d0 = 0
largest_i_d1 = 0
largest_j_d0 = 0
largest_j_d1 = 0
largest_transition_d0 = ""
largest_transition_d1 = ""
atom = Rubidium()
# Iterate over combinations of i and j
for i in range(1, 4):
for j in range(1, 4):
# Calculate the dipole matrix element for pi/pi transition with d=0
dsDME_pi_d0 = atom.getDipoleMatrixElement(n, l0, j0, j0, n+i, np.abs(l0-1), np.abs(j0-1), np.abs(j0), 0)
dpDME_pi_d0 = atom.getDipoleMatrixElement(n_2, l0, j0, j0, n_2-j, l0+1, j0, j0, 0)
c3_pi_d0 = (
1
/ (4.0 * np.pi * sc.epsilon_0)
* dsDME_pi_d0
* dpDME_pi_d0
* C_e**2
* (sc.physical_constants["Bohr radius"][0]) ** 2
)
# Calculate the dipole matrix element for sigma+/sigma- transition with d=0
dsDME_sigma_d0 = atom.getDipoleMatrixElement(n, l0, j0, j0, n+i, np.abs(l0-1), np.abs(j0-1), np.abs(j0), -1)
dpDME_sigma_d0 = atom.getDipoleMatrixElement(n_2, l0, j0, j0, n_2-j, l0+1, j0, j0, 1)
c3_sigma_d0 = (
1
/ (4.0 * np.pi * sc.epsilon_0)
* dsDME_sigma_d0
* dpDME_sigma_d0
* C_e**2
* (sc.physical_constants["Bohr radius"][0]) ** 2
)
# Compare the calculated c3 coefficients with d=0 and update the largest values
if abs(c3_pi_d0) > abs(largest_c3_d0):
largest_c3_d0 = c3_pi_d0
largest_i_d0 = i
largest_j_d0 = j
largest_transition_d0 = "pi/pi"
if abs(c3_sigma_d0) > abs(largest_c3_d0):
largest_c3_d0 = c3_sigma_d0
largest_i_d0 = i
largest_j_d0 = j
largest_transition_d0 = "sigma+/sigma-"
# Calculate the dipole matrix element for pi/pi transition with d=1
dsDME_pi_d1 = atom.getDipoleMatrixElement(n, l0, j0, j0, n+i, np.abs(l0-1), np.abs(j0-1), np.abs(j0-1), 0)
dpDME_pi_d1 = atom.getDipoleMatrixElement(n_2, l0, j0, j0, n_2-j, l0+1, j0+1, j0+1, 0)
c3_pi_d1 = (
1
/ (4.0 * np.pi * sc.epsilon_0)
* dsDME_pi_d1
* dpDME_pi_d1
* C_e**2
* (sc.physical_constants["Bohr radius"][0]) ** 2
)
# Calculate the dipole matrix element for sigma+/sigma- transition with d=1
dsDME_sigma_d1 = atom.getDipoleMatrixElement(n, l0, j0, j0, n+i, np.abs(l0-1), np.abs(-1+j0), np.abs(-1+j0), -1)
dpDME_sigma_d1 = atom.getDipoleMatrixElement(n_2, l0, j0, j0, n_2-j, l0+1, 1+j0, 1+j0, 1)
c3_sigma_d1 = (
1
/ (4.0 * np.pi * sc.epsilon_0)
* dsDME_sigma_d1
* dpDME_sigma_d1
* C_e**2
* (sc.physical_constants["Bohr radius"][0]) ** 2
)
# Compare the calculated c3 coefficients with d=1 and update the largest values
if abs(c3_pi_d1) > abs(largest_c3_d1):
largest_c3_d1 = c3_pi_d1
largest_i_d1 = i
largest_j_d1 = j
largest_transition_d1 = "pi/pi"
if abs(c3_sigma_d1) > abs(largest_c3_d1):
largest_c3_d1 = c3_sigma_d1
largest_i_d1 = i
largest_j_d1 = j
largest_transition_d1 = "sigma+/sigma-"
return (
largest_i_d0, largest_j_d0, largest_transition_d0, abs(largest_c3_d0) / C_h * 1.0e9,
largest_i_d1, largest_j_d1, largest_transition_d1, abs(largest_c3_d1) / C_h * 1.0e9
)
# Specify the value of n, l0, and j0
n = 59
n_2 = 59
l = 0
j = 0.5
# Find the largest C3 coefficients for d=0 and d=1, and their corresponding i, j, and transition
largest_i_d0, largest_j_d0, largest_transition_d0, largest_c3_d0, largest_i_d1, largest_j_d1, largest_transition_d1, largest_c3_d1 = find_largest_c3(n, n_2, l, j)
# Print the results
print("For d=0:")
print("Largest C3 of Rb(%dP -> %dS/%dD) = %.3f GHz (µm)^3 (i = %d, j = %d, Transition = %s)" % (n, n-largest_i_d0, n+largest_j_d0, largest_c3_d0, largest_i_d0, largest_j_d0, largest_transition_d0))
print("For d=1:")
print("Largest C3 of Rb(%dP -> %dS/%dD) = %.3f GHz (µm)^3 (i = %d, j = %d, Transition = %s)" % (n, n-largest_i_d1, n+largest_j_d1, largest_c3_d1, largest_i_d1, largest_j_d1, largest_transition_d1))
'--------------------------------------------------'
#resonant interaction of groundstate to excited state with opposite parity
atom = Rubidium(cpp_numerov=False)
dme = atom.getDipoleMatrixElement(63, 1, 1/2, 1/2, 40, 0, 1/2, 1/2, +1)
c3_2 = (
1
/ (4.0 * np.pi * sc.epsilon_0)
* dme
* dme
* C_e**2
* (sc.physical_constants["Bohr radius"][0]) ** 2
)
print("C_3 of Rb(63 S -> 61P) = %.3f GHz (mu m)^3 " % (abs(c3_2) / C_h * 1.0e9))
'================================================='
# Evaluation of the Cs 60S_1/2 C6 coefficient using perturbation theory (Theta=0,phi=0)
l0 = 0
j0 = 0.5
mj0 = 0.5
# Target State
theta = 0
# Polar Angle [0-pi]
phi = 0
# Azimuthal Angle [0-2pi]
dn = 5
# Range of n to consider (n0-dn:n0+dn)
deltaMax = 25e9 # Max pair-state energy difference [Hz]
# Set target-state and extract value
calculation = PairStateInteractions(
Rubidium(), n, l0, j0, n, l0, j0, mj0, mj0
)
C6 = calculation.getC6perturbatively(theta, phi, dn, deltaMax)
print("C6 [%s] = %.2f GHz (mum)^6" % (printStateString(n, l0, j0), C6))
'--------------------------------------------------'
# Define a range of values for n
n_values = range(30, 80)
a_1 = 1 #µm
# Lists to store the C3 and C6 coefficients for d=0 and d=1
c3_values_d0 = []
c3_values_d1 = []
c6_values = []
# Iterate over the values of n
for n in n_values:
# Find the largest C3 coefficients for d=0 and d=1, and their corresponding i, j, and transition
largest_i_d0, largest_j_d0, largest_transition_d0, largest_c3_d0, largest_i_d1, largest_j_d1, largest_transition_d1, largest_c3_d1 = find_largest_c3(n, n_2, l0, j0)
# Append the largest C3 coefficients to the respective c3_values lists
c3_values_d0.append(largest_c3_d0 / a_1**3)
c3_values_d1.append(largest_c3_d1 / a_1**3)
# Calculate the C6 coefficient
calculation = PairStateInteractions(
Rubidium(), n, l0, j0, n, l0, j0, mj0, mj0
)
C6 = calculation.getC6perturbatively(theta, phi, dn, deltaMax)
# Append the C6 coefficient to the c6_values list
c6_values.append(np.abs(C6) / a_1**6)
#Plotting the C3 and C6 coefficientsplt.plot(n_values, c3_values_d1, label="Largest C3 Coefficient")
#plt.plot(n_values, c3_values_d1, label="C3 Coefficient (d=1)")
#plt.plot(n_values, c6_values, label="C6 Coefficient")
'-------------------'
plt.semilogy(n_values, c3_values_d0, label="Largest C3 Coefficient") #CURRENTLY: d=1
plt.semilogy(n_values, c6_values, label="C6 Coefficient")
'-------------------'
plt.xlabel("n")
plt.ylabel("C3, C6 [GHz]")
plt.legend(fontsize = "large", loc="upper left")
plt.title("C3 & C6 coefficients of Rb |n,S>")
plt.savefig('log plot S c3,c6.png', dpi=300)
plt.show()
|
tappelnano/RydbergPTG
|
ARC C3_C6 calc.py
|
ARC C3_C6 calc.py
|
py
| 7,589 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.abs",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "scipy.constants.epsilon_0",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "scipy.constants",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "scipy.constants.physical_constants",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "scipy.constants",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "numpy.abs",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "scipy.constants.epsilon_0",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "scipy.constants",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "scipy.constants.physical_constants",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "scipy.constants",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "numpy.abs",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "scipy.constants.epsilon_0",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "scipy.constants",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "scipy.constants.physical_constants",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "scipy.constants",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "numpy.abs",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "scipy.constants.epsilon_0",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "scipy.constants",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "scipy.constants.physical_constants",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "scipy.constants",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "numpy.pi",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "scipy.constants.epsilon_0",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "scipy.constants",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "scipy.constants.physical_constants",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "scipy.constants",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "numpy.abs",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.semilogy",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 200,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.semilogy",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 206,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 207,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 208,
"usage_type": "name"
}
] |
29381018111
|
import copy
import tempfile
import yaml
import re
import os
import constellation.vault as vault
from constellation.util import ImageReference
def read_yaml(filename):
with open(filename, "r") as f:
dat = yaml.load(f, Loader=yaml.SafeLoader)
dat = parse_env_vars(dat)
return dat
def config_build(path, data, extra=None, options=None):
data = copy.deepcopy(data)
if extra:
data_extra = read_yaml("{}/{}.yml".format(path, extra))
config_check_additional(data_extra)
combine(data, data_extra)
if options:
if isinstance(options, list):
options = collapse(options)
config_check_additional(options)
combine(data, options)
return data
# Utility function for centralising control over pulling information
# out of the configuration.
def config_value(data, path, data_type, is_optional, default=None):
if type(path) is str:
path = [path]
for i, p in enumerate(path):
try:
data = data[p]
if data is None:
raise KeyError()
except KeyError as e:
if is_optional:
return default
e.args = (":".join(path[:(i + 1)]),)
raise e
expected = {"string": str,
"integer": int,
"boolean": bool,
"dict": dict,
"list": list}
if type(data) is not expected[data_type]:
raise ValueError("Expected {} for {}".format(
data_type, ":".join(path)))
return data
# TODO: This can be made better with respect to optional values (e.g.,
# if url is present other keys are required).
def config_vault(data, path):
url = config_string(data, path + ["addr"], True)
auth_method = config_string(data, path + ["auth", "method"], True)
auth_args = config_dict(data, path + ["auth", "args"], True)
return vault.vault_config(url, auth_method, auth_args)
def config_string(data, path, is_optional=False, default=None):
return config_value(data, path, "string", is_optional, default)
def config_integer(data, path, is_optional=False, default=None):
return config_value(data, path, "integer", is_optional, default)
def config_boolean(data, path, is_optional=False, default=None):
return config_value(data, path, "boolean", is_optional, default)
def config_dict(data, path, is_optional=False, default=None):
return config_value(data, path, "dict", is_optional, default)
def config_dict_strict(data, path, keys, is_optional=False, default=None):
d = config_dict(data, path, is_optional)
if not d:
return default
if set(keys) != set(d.keys()):
raise ValueError("Expected keys {} for {}".format(
", ".join(keys), ":".join(path)))
for k, v in d.items():
if type(v) is not str:
raise ValueError("Expected a string for {}".format(
":".join(path + [k])))
return d
def config_list(data, path, is_optional=False, default=None):
return config_value(data, path, "list", is_optional, default)
def config_enum(data, path, values, is_optional=False, default=None):
value = config_string(data, path, is_optional, default)
if value not in values:
raise ValueError("Expected one of [{}] for {}".format(
", ".join(values), ":".join(path)))
return value
def config_image_reference(dat, path, name="name"):
if type(path) is str:
path = [path]
repo = config_string(dat, path + ["repo"])
name = config_string(dat, path + [name])
tag = config_string(dat, path + ["tag"])
return ImageReference(repo, name, tag)
def config_check_additional(options):
if "container_prefix" in options:
raise Exception("'container_prefix' may not be modified")
def combine(base, extra):
"""Combine exactly two dictionaries recursively, modifying the first
argument in place with the contets of the second"""
for k, v in extra.items():
if k in base and type(base[k]) is dict and v is not None:
combine(base[k], v)
else:
base[k] = v
def collapse(options):
"""Combine a list of dictionaries recursively, combining from left to
right so that later dictionaries override values in earlier ones"""
ret = {}
for o in options:
combine(ret, o)
return ret
def parse_env_vars(data):
if isinstance(data, (dict, list)):
for k, v in (data.items() if isinstance(data, dict)
else enumerate(data)):
if isinstance(v, (dict, list)):
data[k] = parse_env_vars(v)
if isinstance(v, str) and re.search("^\\$[0-9A-Z_]+$", v):
data[k] = get_envvar(v[1:])
return data
def get_envvar(name):
try:
return os.environ[name]
except KeyError:
raise KeyError("Did not find env var '{}'".format(
name))
|
reside-ic/constellation
|
constellation/config.py
|
config.py
|
py
| 4,914 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "yaml.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "yaml.SafeLoader",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "constellation.vault.vault_config",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "constellation.vault",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "constellation.util.ImageReference",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 156,
"usage_type": "attribute"
}
] |
37091297903
|
#!/usr/bin/python
from __future__ import print_function
import negspy.coordinates as nc
import sys
import argparse
from itertools import tee
def pairwise(iterable):
"s -> (s0, s1), (s2, s3), (s4, s5), ..."
a = iter(iterable)
return zip(a, a)
def main():
parser = argparse.ArgumentParser(description="""
python chr_pos_to_genome_pos.py -t 1,2:3,4
Convert chromosome,position pairs to genome_positions. Assumes that the
coordinates refer to the hg19 assembly (unless otherwise specified).
Example:
2 NM_000014 chr12 - 9220303 9268825
-> python scripts/chr_pos_to_genome_pos.py -c 3:5,3:6
2 NM_000014 genome - 2115405269 2115453791
--------------------------------
This also works with space-delimited fields:
chr5 56765,56766
->python scripts/chr_pos_to_genome_pos.py -c 1:2
genome 881683465,881683466
""")
parser.add_argument('-a', '--assembly', default='hg19')
parser.add_argument('-s', '--chromsizes-file', default=None)
parser.add_argument('-n', '--new-chrom', default=None)
parser.add_argument('-c', '--columns', default='1,2',
help="Which columns to translate to genome positions. "
"Column pairs should be 1-based and separated by colons")
#parser.add_argument('-u', '--useless', action='store_true',
# help='Another useless option')
args = parser.parse_args()
if args.chromsizes_file is not None:
chrom_info = nc.get_chrominfo_from_file(args.chromsizes_file)
else:
chrom_info = nc.get_chrominfo(args.assembly)
for line in sys.stdin:
try:
line_output = []
line_parts = line.strip().split()
translated_positions = {}
translated_chroms = {}
for translate_pair in [[int (y) for y in x.split(':')] for x in args.columns.split(',')]:
# go through the pairs of columns that need to be translated to genome position
# assume that the position column is comma separated list of values (although it doesn't
# actually need to be)
chrom,poss = line_parts[translate_pair[0]-1], line_parts[translate_pair[1]-1].strip(",").split(',')
genome_pos = ",".join(map(str,[nc.chr_pos_to_genome_pos( chrom, int(pos), chrom_info) for pos in poss]))
#line_output += [genome_pos]
# note that we've translated these columns and shouldn't include them in the output
translated_positions[translate_pair[1]-1] = genome_pos
translated_chroms[translate_pair[0]-1] = chrom
for i,part in enumerate(line_parts):
if i in translated_chroms:
# replace chromosome identifiers (e.g. 'chr1') with 'genome' to indicate the positions
if args.new_chrom is None:
line_output += ['genome({})'.format(chrom)]
else:
line_output += [args.new_chrom]
elif i in translated_positions:
# this column used to contain a position so we need to replace it with a translated
# position
line_output += [translated_positions[i]]
else:
# if this column didn't contain a translated position output it as is
line_output += [part]
try:
print("\t".join(map(str, line_output)))
except BrokenPipeError:
# Output is probably being run through "head" or something similar
break
except KeyError as ke:
print("KeyError:", ke, line.strip(), file=sys.stderr)
if __name__ == '__main__':
main()
|
pkerpedjiev/negspy
|
scripts/chr_pos_to_genome_pos.py
|
chr_pos_to_genome_pos.py
|
py
| 3,851 |
python
|
en
|
code
| 9 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "negspy.coordinates.get_chrominfo_from_file",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "negspy.coordinates",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "negspy.coordinates.get_chrominfo",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "negspy.coordinates",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "sys.stdin",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "negspy.coordinates.chr_pos_to_genome_pos",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "negspy.coordinates",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "sys.stderr",
"line_number": 99,
"usage_type": "attribute"
}
] |
26040958016
|
from __future__ import annotations
import logging
from dataclasses import dataclass
from pants.backend.python.subsystems.twine import TwineSubsystem
from pants.backend.python.target_types import PythonDistribution
from pants.backend.python.util_rules.pex import PexRequest, VenvPex, VenvPexProcess
from pants.core.goals.publish import (
PublishFieldSet,
PublishOutputData,
PublishPackages,
PublishProcesses,
PublishRequest,
)
from pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest
from pants.engine.env_vars import EnvironmentVars, EnvironmentVarsRequest
from pants.engine.fs import CreateDigest, Digest, MergeDigests, Snapshot
from pants.engine.process import InteractiveProcess, Process
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import BoolField, StringSequenceField
from pants.option.global_options import GlobalOptions
from pants.util.strutil import help_text
logger = logging.getLogger(__name__)
class PythonRepositoriesField(StringSequenceField):
alias = "repositories"
help = help_text(
"""
List of URL addresses or Twine repository aliases where to publish the Python package.
Twine is used for publishing Python packages, so the address to any kind of repository
that Twine supports may be used here.
Aliases are prefixed with `@` to refer to a config section in your Twine configuration,
such as a `.pypirc` file. Use `@pypi` to upload to the public PyPi repository, which is
the default when using Twine directly.
"""
)
# Twine uploads to 'pypi' by default, but we don't set default to ["@pypi"] here to make it
# explicit in the BUILD file when a package is meant for public distribution.
class SkipTwineUploadField(BoolField):
alias = "skip_twine"
default = False
help = "If true, don't publish this target's packages using Twine."
class PublishPythonPackageRequest(PublishRequest):
pass
@dataclass(frozen=True)
class PublishPythonPackageFieldSet(PublishFieldSet):
publish_request_type = PublishPythonPackageRequest
required_fields = (PythonRepositoriesField,)
repositories: PythonRepositoriesField
skip_twine: SkipTwineUploadField
def get_output_data(self) -> PublishOutputData:
return PublishOutputData(
{
"publisher": "twine",
**super().get_output_data(),
}
)
# I'd rather opt out early here, so we don't build unnecessarily, however the error feedback is
# misleading and not very helpful in that case.
#
# @classmethod
# def opt_out(cls, tgt: Target) -> bool:
# return not tgt[PythonRepositoriesField].value
def twine_upload_args(
twine_subsystem: TwineSubsystem,
config_files: ConfigFiles,
repo: str,
dists: tuple[str, ...],
ca_cert: Snapshot | None,
) -> tuple[str, ...]:
args = ["upload", "--non-interactive"]
if ca_cert and ca_cert.files:
args.append(f"--cert={ca_cert.files[0]}")
if config_files.snapshot.files:
args.append(f"--config-file={config_files.snapshot.files[0]}")
args.extend(twine_subsystem.args)
if repo.startswith("@"):
# Named repository from the config file.
args.append(f"--repository={repo[1:]}")
else:
args.append(f"--repository-url={repo}")
args.extend(dists)
return tuple(args)
def twine_env_suffix(repo: str) -> str:
return f"_{repo[1:]}".replace("-", "_").upper() if repo.startswith("@") else ""
def twine_env_request(repo: str) -> EnvironmentVarsRequest:
suffix = twine_env_suffix(repo)
env_vars = [
"TWINE_USERNAME",
"TWINE_PASSWORD",
"TWINE_REPOSITORY_URL",
]
req = EnvironmentVarsRequest(env_vars + [f"{var}{suffix}" for var in env_vars])
return req
def twine_env(env: EnvironmentVars, repo: str) -> EnvironmentVars:
suffix = twine_env_suffix(repo)
return EnvironmentVars(
{key.rsplit(suffix, maxsplit=1)[0] if suffix else key: value for key, value in env.items()}
)
@rule
async def twine_upload(
request: PublishPythonPackageRequest,
twine_subsystem: TwineSubsystem,
global_options: GlobalOptions,
) -> PublishProcesses:
dists = tuple(
artifact.relpath
for pkg in request.packages
for artifact in pkg.artifacts
if artifact.relpath
)
if twine_subsystem.skip or not dists:
return PublishProcesses()
# Too verbose to provide feedback as to why some packages were skipped?
skip = None
if request.field_set.skip_twine.value:
skip = f"(by `{request.field_set.skip_twine.alias}` on {request.field_set.address})"
elif not request.field_set.repositories.value:
# I'd rather have used the opt_out mechanism on the field set, but that gives no hint as to
# why the target was not applicable..
skip = f"(no `{request.field_set.repositories.alias}` specified for {request.field_set.address})"
if skip:
return PublishProcesses(
[
PublishPackages(
names=dists,
description=skip,
),
]
)
twine_pex, packages_digest, config_files = await MultiGet(
Get(VenvPex, PexRequest, twine_subsystem.to_pex_request()),
Get(Digest, MergeDigests(pkg.digest for pkg in request.packages)),
Get(ConfigFiles, ConfigFilesRequest, twine_subsystem.config_request()),
)
ca_cert_request = twine_subsystem.ca_certs_digest_request(global_options.ca_certs_path)
ca_cert = await Get(Snapshot, CreateDigest, ca_cert_request) if ca_cert_request else None
ca_cert_digest = (ca_cert.digest,) if ca_cert else ()
input_digest = await Get(
Digest, MergeDigests((packages_digest, config_files.snapshot.digest, *ca_cert_digest))
)
pex_proc_requests = []
twine_envs = await MultiGet(
Get(EnvironmentVars, EnvironmentVarsRequest, twine_env_request(repo))
for repo in request.field_set.repositories.value
)
for repo, env in zip(request.field_set.repositories.value, twine_envs):
pex_proc_requests.append(
VenvPexProcess(
twine_pex,
argv=twine_upload_args(twine_subsystem, config_files, repo, dists, ca_cert),
input_digest=input_digest,
extra_env=twine_env(env, repo),
description=repo,
)
)
processes = await MultiGet(
Get(Process, VenvPexProcess, request) for request in pex_proc_requests
)
return PublishProcesses(
PublishPackages(
names=dists,
process=InteractiveProcess.from_process(process),
description=process.description,
data=PublishOutputData({"repository": process.description}),
)
for process in processes
)
def rules():
return (
*collect_rules(),
*PublishPythonPackageFieldSet.rules(),
PythonDistribution.register_plugin_field(PythonRepositoriesField),
PythonDistribution.register_plugin_field(SkipTwineUploadField),
)
|
pantsbuild/pants
|
src/python/pants/backend/python/goals/publish.py
|
publish.py
|
py
| 7,218 |
python
|
en
|
code
| 2,896 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.StringSequenceField",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "pants.util.strutil.help_text",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.BoolField",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "pants.core.goals.publish.PublishRequest",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "pants.core.goals.publish.PublishFieldSet",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "pants.core.goals.publish.PublishOutputData",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "pants.core.goals.publish.PublishOutputData",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "pants.backend.python.subsystems.twine.TwineSubsystem",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "pants.core.util_rules.config_files.ConfigFiles",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "pants.engine.fs.Snapshot",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "pants.engine.env_vars.EnvironmentVarsRequest",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "pants.engine.env_vars.EnvironmentVarsRequest",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "pants.engine.env_vars.EnvironmentVars",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "pants.engine.env_vars.EnvironmentVars",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "pants.backend.python.subsystems.twine.TwineSubsystem",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "pants.option.global_options.GlobalOptions",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "pants.core.goals.publish.PublishProcesses",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "pants.core.goals.publish.PublishProcesses",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "pants.core.goals.publish.PublishPackages",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.MultiGet",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.Get",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "pants.backend.python.util_rules.pex.VenvPex",
"line_number": 166,
"usage_type": "argument"
},
{
"api_name": "pants.backend.python.util_rules.pex.PexRequest",
"line_number": 166,
"usage_type": "argument"
},
{
"api_name": "pants.engine.rules.Get",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 167,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.MergeDigests",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.Get",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "pants.core.util_rules.config_files.ConfigFiles",
"line_number": 168,
"usage_type": "argument"
},
{
"api_name": "pants.core.util_rules.config_files.ConfigFilesRequest",
"line_number": 168,
"usage_type": "argument"
},
{
"api_name": "pants.engine.rules.Get",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Snapshot",
"line_number": 172,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.CreateDigest",
"line_number": 172,
"usage_type": "argument"
},
{
"api_name": "pants.engine.rules.Get",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 176,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.MergeDigests",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.MultiGet",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.Get",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "pants.engine.env_vars.EnvironmentVars",
"line_number": 180,
"usage_type": "argument"
},
{
"api_name": "pants.engine.env_vars.EnvironmentVarsRequest",
"line_number": 180,
"usage_type": "argument"
},
{
"api_name": "pants.backend.python.util_rules.pex.VenvPexProcess",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.MultiGet",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.Get",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "pants.engine.process.Process",
"line_number": 196,
"usage_type": "argument"
},
{
"api_name": "pants.backend.python.util_rules.pex.VenvPexProcess",
"line_number": 196,
"usage_type": "argument"
},
{
"api_name": "pants.core.goals.publish.PublishProcesses",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "pants.core.goals.publish.PublishPackages",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "pants.engine.process.InteractiveProcess.from_process",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "pants.engine.process.InteractiveProcess",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "pants.core.goals.publish.PublishOutputData",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.rule",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "pants.core.goals.publish.PublishProcesses",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "pants.engine.rules.collect_rules",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "pants.backend.python.target_types.PythonDistribution.register_plugin_field",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "pants.backend.python.target_types.PythonDistribution",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "pants.backend.python.target_types.PythonDistribution.register_plugin_field",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "pants.backend.python.target_types.PythonDistribution",
"line_number": 215,
"usage_type": "name"
}
] |
27321032293
|
"""
Kela Purchase data preprocessing
Reads Kela Purchase data, applies the preprocessing steps below and writes the result to files split by year.
- Convert column names to uppercase
- Rename HETU to FINREGISTRYID
- Format dates to YYYY-MM-DD
- Drop duplicates rows
- Fix data types
Input files:
- For years 1995-2019 (split by year): 175_522_2020_LAAKEOSTOT_<year>.csv.finreg_IDs (25 files)
- For years 2020-2021 (split by month): 81_522_2022_LAAKEOSTOT_<year><month>.csv.finreg_IDs (24 files)
Output files:
- purchase_<year>.csv (27 files)
- purchase_<year>.feather (27 files)
"""
import pandas as pd
import logging
from datetime import datetime
from finregistry_data.config import KELA_PURCHASE_INPUT_DIR, KELA_PURCHASE_OUTPUT_DIR
from finregistry_data.utils import write_data
def preprocess_purchases(path):
"""
Preprocess Kela drug purchases input file
Args:
path (str): Path to the input file
Returns:
Preprocessed dataframe
"""
df = pd.read_csv(path, sep=";", dtype=str)
# Convert column names to uppercase
df.columns = df.columns.str.upper()
# Format dates
for date_col in ["OSTOPV", "RKPV"]:
df[date_col] = pd.to_datetime(df[date_col], errors="coerce").dt.date
# Rename HETU to FINREGISTRYID
df = df.rename(columns={"HETU": "FINREGISTRYID"})
# Drop duplicates
df = df.drop_duplicates().reset_index(drop=True)
# Fix data types
dtypes = {
"PLKM": float,
"KUST_EUR": float,
"KORV_EUR": float,
"KAKORV_EUR": float,
}
df = df.astype(dtypes)
return df
def convert_csv_to_feather(path, output_name):
"""
Convert a preprocessed KELA Purchases file into a feather file
Args:
path (str): path to the preprocessed file
output_name (str): name of the output file without the file extension
"""
dtypes = {
"FINREGISTRYID": str,
"ATC": str,
"PLKM": float,
"KUST_EUR": float,
"KORV_EUR": float,
"KAKORV_EUR": float,
"RPK": str,
"LAJI": str,
"VNRO": str,
"SAIR": str,
"RGTNO": str,
"ASKU": str,
"SHP_NRO": str,
"TILASTOVUOSI": str,
"ANJA": str,
}
date_cols = ["OSTOPV", "RKPV"]
df = pd.read_csv(path, dtype=dtypes, parse_dates=date_cols)
write_data(df, KELA_PURCHASE_OUTPUT_DIR, output_name, "feather")
if __name__ == "__main__":
# Set logging level to INFO
logging.basicConfig(level=logging.INFO)
# Loop through files split by year
for year in range(1995, 2020):
filename = "175_522_2020_LAAKEOSTOT_" + str(year) + ".csv.finreg_IDs"
input_path = KELA_PURCHASE_INPUT_DIR / filename
logging.info("Processing file " + filename)
df = preprocess_purchases(input_path)
write_data(df, KELA_PURCHASE_OUTPUT_DIR, "purchases_" + str(year), "csv")
write_data(df, KELA_PURCHASE_OUTPUT_DIR, "purchases_" + str(year), "feather")
# Loop through files split by month
today = datetime.today().strftime("%Y-%m-%d")
for year in range(2020, 2022):
for month in range(1, 12):
filename = (
"81_522_2022_LAAKEOSTOT_"
+ str(year)
+ str(month).zfill(2)
+ ".csv.finreg_IDs"
)
input_path = KELA_PURCHASE_INPUT_DIR / filename
logging.info("Processing file " + filename)
df = preprocess_purchases(input_path)
header = True if month == 1 else False
output_path = KELA_PURCHASE_OUTPUT_DIR / (
"purchases_" + str(year) + "_" + today + ".csv"
)
df.to_csv(output_path, mode="a", header=header, index=False)
convert_csv_to_feather(KELA_PURCHASE_OUTPUT_DIR, "purchases_" + str(year))
|
dsgelab/finregistry-data
|
finregistry_data/registries/kela_purchase.py
|
kela_purchase.py
|
py
| 3,850 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "finregistry_data.utils.write_data",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "finregistry_data.config.KELA_PURCHASE_OUTPUT_DIR",
"line_number": 92,
"usage_type": "argument"
},
{
"api_name": "logging.basicConfig",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "finregistry_data.config.KELA_PURCHASE_INPUT_DIR",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "finregistry_data.utils.write_data",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "finregistry_data.config.KELA_PURCHASE_OUTPUT_DIR",
"line_number": 105,
"usage_type": "argument"
},
{
"api_name": "finregistry_data.utils.write_data",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "finregistry_data.config.KELA_PURCHASE_OUTPUT_DIR",
"line_number": 106,
"usage_type": "argument"
},
{
"api_name": "datetime.datetime.today",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "finregistry_data.config.KELA_PURCHASE_INPUT_DIR",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "finregistry_data.config.KELA_PURCHASE_OUTPUT_DIR",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "finregistry_data.config.KELA_PURCHASE_OUTPUT_DIR",
"line_number": 126,
"usage_type": "argument"
}
] |
20840870665
|
"""
файл с утилитами
"""
import os
from time import perf_counter
import numpy as np
from sklearn.metrics import (
brier_score_loss,
matthews_corrcoef,
roc_curve,
precision_recall_curve,
auc,
cohen_kappa_score,
classification_report,
# confusion_matrix,
)
from sklearn.metrics import recall_score, precision_score
import shap
import matplotlib.pyplot as plt
from functools import wraps
def get_metrics(model, x_val, y_val):
"""
Вычисление простых метрик
"""
y_pred = model.predict(x_val)
mse = np.mean((y_val - y_pred)**2)
mask = y_val > 0
mape = (np.fabs(y_val - y_pred) / y_val)[mask].mean()
return y_pred, mse, mape
def shap_analysis(booster, data, name_f):
# fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(21, 12))
shap_values = shap.TreeExplainer(booster).shap_values(data)
fig = plt.figure(figsize=(40, 40))
shap.summary_plot(shap_values, data, show=False, max_display=len(data.columns))
fig.savefig(name_f, bbox_inches="tight")
|
Lenin22/ML-Demo
|
utils.py
|
utils.py
|
py
| 1,071 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.mean",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.fabs",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "shap.TreeExplainer",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "shap.summary_plot",
"line_number": 41,
"usage_type": "call"
}
] |
74190845628
|
# -*- coding: utf-8 -*-
__author__ = "ALEX-CHUN-YU ([email protected])"
from word2vec import Word2Vec as w2v
import MySQLdb
import numpy as np
from bert_embedding import BertEmbedding
import codecs
import re
# Entity to Vector
class E2V_BERT:
# init
def __init__(self):
self.db = MySQLdb.connect(host = "127.0.0.1", user = "root", passwd = "wmmkscsie", db = "recommender_system", charset = "utf8")
self.cursor = self.db.cursor()
self.articles_ner_tag = []
self.movies_ner_tag = []
# 產生詞典以供後序 experiment 使用
self.entity_and_vector = []
# main function
def e2v_bert(self):
# 透過 bert embedding 產生向量並將生成的 relationship feature 和 scenario feature 存入
self.load_data()
self.extract_vector_and_save_vector(dimension = 768)
# self.produce_entity_vector_table()
# load data
def load_data(self):
# articles ner 221269
self.cursor.execute("SELECT a.id, a.content_ner_tag FROM articles_ner as a, articles as b Where a.id = b.id and a.id >= 0 and a.id <= 0 and b.relationship_type != ''")
self.articles_ner_tag = self.cursor.fetchall()
# movies ner 3722
self.cursor.execute("SELECT a.id, a.storyline_ner_tag FROM movies_ner as a, movies as b Where a.id = b.id and a.id >= 1 and a.id <= 3722 and b.scenario_type != ''")
self.movies_ner_tag = self.cursor.fetchall()
# 取得向量(Using bert) 並產生 relationship feature 和 scenario feature 存入
def extract_vector_and_save_vector(self, dimension):
bert_embedding = BertEmbedding(model = 'bert_12_768_12', dataset_name='wiki_cn', max_seq_length = 50)
# self.articles_ner_tag = [[1, "人:none 失戀:em 悲觀:em 房間:lo 感到:none 難過:em @ 戀情:em 感到:none 傷心:em 值得:none 人:none 人:none 失戀:em@後會:none 傷害自己:ev 事業:none 失敗:ev 事情:none 失敗:em 忘:ev 走:ev"]]
# self.movies_ner_tag = [[1, "戀情:ev 感到:none "], [2, "人:none 失戀:em 悲觀:em 房間:lo 感到:none 難過:em @ 戀情:ev 感到:none "]]
for article_ner_tag in self.articles_ner_tag:
article_id = article_ner_tag[0]
sentences_ner_tag = article_ner_tag[1]
print("article_id:", end = '')
print(article_id)
relationship_e2v_bert = []
scenario_e2v_bert = []
sentences = []
entity_type_position_length_in_sentences = []
for sentence_ner_tag in sentences_ner_tag.split('@'):
if sentences_ner_tag != "":
sentence = ""
entity_type_position_length_in_sentence = []
for term_ner_tag in sentence_ner_tag.split(' '):
if " " not in term_ner_tag and term_ner_tag != "":
# print(term_ner_tag)
term_ner_tag = term_ner_tag.split(':')
term = term_ner_tag[0]
tag = term_ner_tag[1]
position = int(term_ner_tag[2])
length = int(term_ner_tag[3])
entity_type_position_length_in_sentence.append([term, tag, position, length])
sentence += term
sentences.append(sentence)
# print(len(entity_type_position_length_in_sentence))
entity_type_position_length_in_sentences.append(entity_type_position_length_in_sentence)
print(sentences)
print(entity_type_position_length_in_sentences)
results = bert_embedding(sentences)
print("文章長度:", end = "")
print(len(results))
po_vector = np.zeros(dimension)
em_vector = np.zeros(dimension)
ev_vector = np.zeros(dimension)
lo_vector = np.zeros(dimension)
ti_vector = np.zeros(dimension)
po_count = 0
em_count = 0
ev_count = 0
lo_count = 0
ti_count = 0
for i, result in enumerate(results):
print(sentences[i])
print(entity_type_position_length_in_sentences[i])
print(result[0])
for i, entity in enumerate(entity_type_position_length_in_sentences[i]):
entity_vector = np.zeros(dimension)
try:
for i in range(entity[3]):
entity_vector += result[1][entity[2] + 1 + i]
except:
print("some illegal characters")
break
if entity[1] == 'none':
pass
elif entity[1] == 'po':
po_vector += entity_vector
po_count += 1
elif entity[1] == 'em':
em_vector += entity_vector
em_count += 1
elif entity[1] == 'ev':
ev_vector += entity_vector
ev_count += 1
elif entity[1] == 'lo':
lo_vector += entity_vector
lo_count += 1
elif entity[1] == 'ti':
ti_vector += entity_vector
ti_count += 1
# 建立 Bert Table
# self.entity_and_vector.append([entity[0], entity_vector])
print(po_vector[:5])
print(em_vector[:5])
print(ev_vector[:5])
print(lo_vector[:5])
print(ti_vector[:5])
# print(po_count)
# print(em_count)
# print(ev_count)
# print(lo_count)
# print(ti_count)
if po_count == 0:
po_count = 1
if em_count == 0:
em_count = 1
if ev_count == 0:
ev_count = 1
if lo_count == 0:
lo_count = 1
if ti_count == 0:
ti_count = 1
relationship_e2v_bert = np.append(relationship_e2v_bert, po_vector/po_count)
relationship_e2v_bert = np.append(relationship_e2v_bert, em_vector/em_count)
relationship_e2v_bert = np.append(relationship_e2v_bert, ev_vector/ev_count)
relationship_e2v_bert = np.append(relationship_e2v_bert, lo_vector/lo_count)
relationship_e2v_bert = np.append(relationship_e2v_bert, ti_vector/ti_count)
scenario_e2v_bert = np.append(scenario_e2v_bert, em_vector/em_count)
scenario_e2v_bert = np.append(scenario_e2v_bert, ev_vector/ev_count)
print(relationship_e2v_bert.shape)
print(scenario_e2v_bert.shape)
# print(relationship_e2v_bert[1536])
# print(relationship_e2v_bert[2304])
sql = "UPDATE articles_vector SET relationship_e2v_bert=%s, scenario_e2v_bert=%s WHERE id=%s"
val = (str(list(relationship_e2v_bert)), str(list(scenario_e2v_bert)), article_id)
self.cursor.execute(sql, val)
self.db.commit()
print("="*10)
for movie_ner_tag in self.movies_ner_tag:
movie_id = movie_ner_tag[0]
sentences_ner_tag = movie_ner_tag[1]
print("movie_id:", end = '')
print(movie_id)
scenario_e2v_bert = []
sentences = []
entity_type_position_length_in_sentences = []
for sentence_ner_tag in sentences_ner_tag.split('@'):
if sentence_ner_tag != "":
sentence = ""
entity_type_position_length_in_sentence = []
for term_ner_tag in sentence_ner_tag.split(' '):
if " " not in term_ner_tag and term_ner_tag != "":
term_ner_tag = term_ner_tag.split(':')
term = term_ner_tag[0]
tag = term_ner_tag[1]
position = int(term_ner_tag[2])
length = int(term_ner_tag[3])
entity_type_position_length_in_sentence.append([term, tag, position, length])
sentence += term
sentences.append(sentence)
# print(len(entity_type_position_length_in_sentence))
entity_type_position_length_in_sentences.append(entity_type_position_length_in_sentence)
print(sentences)
print(entity_type_position_length_in_sentences)
results = bert_embedding(sentences)
print("故事情節長度:", end = "")
print(len(results))
em_vector = np.zeros(dimension)
ev_vector = np.zeros(dimension)
em_count = 0
ev_count = 0
for i, result in enumerate(results):
print(sentences[i])
print(entity_type_position_length_in_sentences[i])
print(result[0])
for i, entity in enumerate(entity_type_position_length_in_sentences[i]):
entity_vector = np.zeros(dimension)
try:
for i in range(entity[3]):
entity_vector += result[1][entity[2] + 1 + i]
except:
print("some illegal characters")
break
if entity[1] == 'none':
pass
elif entity[1] == 'po':
pass
elif entity[1] == 'em':
em_vector += entity_vector
em_count += 1
elif entity[1] == 'ev':
ev_vector += entity_vector
ev_count += 1
elif entity[1] == 'lo':
pass
elif entity[1] == 'ti':
pass
# self.entity_and_vector.append([entity[0], entity_vector])
print(em_vector[:5])
print(ev_vector[:5])
# print(em_count)
# print(ev_count)
if em_count == 0:
em_count = 1
if ev_count == 0:
ev_count = 1
scenario_e2v_bert = np.append(scenario_e2v_bert, em_vector/em_count)
scenario_e2v_bert = np.append(scenario_e2v_bert, ev_vector/ev_count)
print(scenario_e2v_bert.shape)
sql = "UPDATE movies_vector SET scenario_e2v_bert=%s WHERE id=%s"
val = (str(list(scenario_e2v_bert)), movie_id)
self.cursor.execute(sql, val)
self.db.commit()
print("="*10)
# 產生 entity 對應的 vector 表(entity 不可重複)
def produce_entity_vector_table(self):
entity_dict = {}
entity_count = {}
mode = "w"
file = "e2v_bert_table.txt"
with codecs.open(file, mode = mode, encoding = 'utf8') as vector_table:
for entity_vector in self.entity_and_vector:
if entity_vector[0] not in entity_dict.keys():
entity_dict[entity_vector[0]] = entity_vector[1]
entity_count[entity_vector[0]] = 1
else:
entity_dict[entity_vector[0]] = entity_dict[entity_vector[0]] + entity_vector[1]
entity_count[entity_vector[0]] = entity_count[entity_vector[0]] + 1
for entity, count in entity_count.items():
entity_dict[entity] = entity_dict[entity]/count
for entity, vector in entity_dict.items():
vector_table.write(entity + ":")
vector_table.write(str(list(vector)))
vector_table.write("\n")
vector_table.close()
if __name__ == "__main__":
e2v_bert = E2V_BERT()
e2v_bert.e2v_bert()
|
Alex-CHUN-YU/Recommender-System
|
main_embedding/e2v_bert.py
|
e2v_bert.py
|
py
| 9,420 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "MySQLdb.connect",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "bert_embedding.BertEmbedding",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "codecs.open",
"line_number": 228,
"usage_type": "call"
}
] |
26625473616
|
"""Pluggable newsletter handling."""
from django import forms
from django.utils.translation import ugettext_lazy as _
from livesettings import config_value
from satchmo_store.accounts.signals import satchmo_registration
from satchmo_store.contact.signals import satchmo_contact_view
from satchmo_utils import load_module
from signals_ahoy.signals import form_initialdata
import logging
import signals
log = logging.getLogger('newsletter')
def get_newsletter_module():
try:
modulename = config_value('NEWSLETTER', 'MODULE')
except AttributeError:
modulename = 'satchmo_ext.newsletter.ignore'
return load_module(modulename)
def is_subscribed(contact):
if not contact:
return False
return get_newsletter_module().is_subscribed(contact)
def update_subscription(contact, subscribed, attributes={}):
current = is_subscribed(contact)
log.debug("Updating subscription status from %s to %s for %s", current, subscribed, contact)
result = get_newsletter_module().update_contact(contact, subscribed, attributes=attributes)
signals.newsletter_subscription_updated.send(contact,
old_state=current, new_state=subscribed, contact=contact, attributes=attributes)
return result
def update_subscription_listener(contact=None, subscribed=False, **kwargs):
if contact:
update_subscription(contact, subscribed)
def populate_form_initialdata_listener(contact=None, initial = {}, **kwargs):
if contact:
current_subscriber = is_subscribed(contact)
else:
current_subscriber = False
initial['newsletter'] = current_subscriber
def view_user_data_listener(contact=None, contact_dict=None, **kwargs):
module = config_value('NEWSLETTER', 'MODULE')
if module not in ('', 'satchmo_ext.newsletter.ignore'):
contact_dict['show_newsletter'] = True
contact_dict['newsletter'] = is_subscribed(contact)
else:
contact_dict['show_newsletter'] = False
satchmo_contact_view.connect(view_user_data_listener, sender=None)
satchmo_registration.connect(update_subscription_listener, sender=None)
form_initialdata.connect(populate_form_initialdata_listener, sender='RegistrationForm')
|
dokterbob/satchmo
|
satchmo/apps/satchmo_ext/newsletter/__init__.py
|
__init__.py
|
py
| 2,206 |
python
|
en
|
code
| 30 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "livesettings.config_value",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "satchmo_utils.load_module",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "signals.newsletter_subscription_updated.send",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "signals.newsletter_subscription_updated",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "livesettings.config_value",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "satchmo_store.contact.signals.satchmo_contact_view.connect",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "satchmo_store.contact.signals.satchmo_contact_view",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "satchmo_store.accounts.signals.satchmo_registration.connect",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "satchmo_store.accounts.signals.satchmo_registration",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "signals_ahoy.signals.form_initialdata.connect",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "signals_ahoy.signals.form_initialdata",
"line_number": 58,
"usage_type": "name"
}
] |
16370593696
|
import re
import sys
from collections import defaultdict
def get_num_overlapping_points(lines):
counts = defaultdict(lambda: 0)
for (x1, y1), (x2, y2) in lines:
if x1 == x2:
# hortizonal
y11, y22 = (y1, y2) if y2 > y1 else (y2, y1)
for y in range(y11, y22 + 1):
counts[(x1, y)] += 1
elif y1 == y2:
# vert
x11, x22 = (x1, x2) if x2 > x1 else (x2, x1)
for x in range(x11, x22 + 1):
counts[(x, y1)] += 1
elif x1 - x2 == y1 - y2 or x1 - x2 == -(y1 - y2):
# diagonal
xs = (
range(x1, x2 + 1) if x2 > x1 else range(x1, x2 - 1, -1)
)
ys = (
range(y1, y2 + 1) if y2 > y1 else range(y1, y2 - 1, -1)
)
for (x, y) in zip(xs, ys):
counts[(x, y)] += 1
return sum(v > 1 for v in counts.values())
def main(input_file):
with open(input_file, 'r') as f:
content = f.read()
lines = (map(int, nums)
for nums in re.findall(r'(\d+),(\d+) -> (\d+),(\d+)', content))
lines = [((x1, y1), (x2, y2))
for x1, y1, x2, y2 in lines]
val1 = get_num_overlapping_points(
[((x1, y1), (x2, y2)) for ((x1, y1), (x2, y2)) in lines
if x1 == x2 or y1 == y2]
)
print('Part 1:', val1)
val2 = get_num_overlapping_points(lines)
print('Part 2:', val2)
if __name__ == '__main__':
input_file = sys.argv[-1] if len(sys.argv) > 1 else 'input.txt'
main(input_file)
|
sjsawyer/aoc-2021
|
q05/q05.py
|
q05.py
|
py
| 1,577 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.defaultdict",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 53,
"usage_type": "attribute"
}
] |
3326207971
|
import json
import logging
from datetime import datetime
import requests
from system import settings
from system.constants import MODACTION_WH, USELESS_DETAILS
webhook = settings.DISCORD_MODLOG_WEBHOOK
bots = ['AutoModerator', 'FloodgatesBot']
log = logging.getLogger('worker.dsws')
def make_embed(entry):
ts = datetime.fromtimestamp(entry['created_utc']).isoformat().replace('T', ' ')
mod = ('🤖 ' if entry['mod'] in bots else '') + entry['mod']
embed = {
'fields': [{'name': 'Mod', 'value': mod, 'inline': True}],
'footer': {'text': f'Fecha: {ts}'}
}
if entry.get('target_author', ''):
embed['fields'].append({'name': 'Usuario', 'value': entry['target_author'], 'inline': True})
if entry.get('target_permalink', ''):
embed['description'] = f'**Link**: https://www.reddit.com{entry["target_permalink"]}'
if entry.get('details', ''):
details = entry['details']
for k, v in USELESS_DETAILS.items():
if k == details:
details = v
if details:
embed['fields'].append({'name': 'Detalles', 'value': entry['details'], 'inline': True})
if entry.get('target_title', ''):
embed['fields'].append({
'name': 'Título del post',
'value': entry['target_title']
})
if entry.get('target_body', ''):
content_type = 'post' if entry.get('target_title', '') else 'comentario'
body_field = {
'name': f'Contenido del {content_type}',
'value': entry['target_body'][:1000]
}
if len(entry['target_body']) > 1000:
body_field['value'] += '…'
embed['fields'].append(body_field)
return embed
def send(entries):
if not webhook:
return
for entry in entries[:5]:
if entry['action'] not in MODACTION_WH:
return
try:
action_description = MODACTION_WH[entry['action']]
payload = {
'content': f'📝 **{action_description}** por **{entry["mod"]}**',
'embeds': [make_embed(entry)]
}
log.debug('Entry: %s', entry)
log.debug('Enviando mensaje webhook: %s', json.dumps(payload))
resp = requests.post(webhook, json=payload)
if resp.status_code >= 400:
log.error('Error enviando mensaje, estado %i: %s', resp.status_code, resp.text)
except Exception as e:
log.exception(e)
|
rchile/mod-toolbox
|
toolbox/discord_ws.py
|
discord_ws.py
|
py
| 2,511 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "system.settings.DISCORD_MODLOG_WEBHOOK",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "system.settings",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "system.constants.USELESS_DETAILS.items",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "system.constants.USELESS_DETAILS",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "system.constants.MODACTION_WH",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "system.constants.MODACTION_WH",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 73,
"usage_type": "call"
}
] |
41313263665
|
import appdaemon.plugins.hass.hassapi as hass
import time
from babel.numbers import format_number, format_decimal
class wasserdroger(hass.Hass):
def initialize(self):
self.listen_state(self.inputhandler, self.args["trigger"], old="off", new="on")
self.listen_state(self.inputhandler, self.args["trigger"], old="on", new="off")
def inputhandler(self, entity, attribute, old, new, kwargs):
action = self.get_state(self.args["trigger"])
self.log(action)
kwh = self.get_state(self.args["kwhsensor"])
timestamp = str(round(time.time()))
appliance = self.args["appliance"]
path = '/conf/'+appliance+'.csv'
f = open(path,'a')
#self.log(timestamp+";"+str(format_decimal(kwh, locale='de'))+";"+appliance+" "+self.action+"\n")
self.log("action schrijf:")
f.write(timestamp+";"+str(format_decimal(kwh, locale='de'))+";"+appliance+" "+action+"\n")
f.close()
payload = '{ "timestamp" :'+str(format_decimal(kwh, locale='de'))+',"appliance":'+appliance+',"action":'+action+'}'
topic = "zolder/"+appliance+"/status"
self.call_service("mqtt/publish", topic=topic, payload=payload)
|
balk77/Home-AssistantConfig
|
appdaemon4/conf/apps/wasserdroger.py
|
wasserdroger.py
|
py
| 1,208 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "appdaemon.plugins.hass.hassapi.Hass",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "appdaemon.plugins.hass.hassapi",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "babel.numbers.format_decimal",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "babel.numbers.format_decimal",
"line_number": 27,
"usage_type": "call"
}
] |
32188046557
|
# 프로그래머스 - 완전탐색(피로도)
# 순열을 사용해서 던전 순서를 모두 만들어 주었다.
# 이후 만들어진 던전 순서를 사용하고 for문을 사용해서 result의 결과가
# 가장 많은 것으로 값을 바꾸어 주는 방식을 사용하였다.
from itertools import permutations
def solution(k, dungeons):
answer = 0
a = []
for i in range(len(dungeons)):
a.append(i)
permute = permutations(a,len(dungeons))
for j in permute:
k_number = k
result = 0
for K in j:
if dungeons[K][0] <= k_number:
k_number -= dungeons[K][1]
result += 1
else:
continue
if result >= answer:
answer = result
return answer
|
kcw0331/python-for-coding-test
|
programmers-coding/피로도.py
|
피로도.py
|
py
| 794 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "itertools.permutations",
"line_number": 11,
"usage_type": "call"
}
] |
29686629055
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.request import Request
from rest_framework import status
from drf_yasg.utils import swagger_auto_schema
from ..models import (
Appeal,
)
from ..serializers import (
AppealSerializer,
)
class AppealCreate(APIView):
@swagger_auto_schema(operation_description="Create Appeal", request_body=AppealSerializer)
def post(self, request:Request):
data = request.data
serializer = AppealSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(
{
'error': serializer.errors,
'message': 'Invalid data'
},
status=status.HTTP_400_BAD_REQUEST
)
class AppealList(APIView):
@swagger_auto_schema(
operation_description="Get Appeal list",
responses={200: AppealSerializer(many=True)}
)
def get(self, request:Request):
appeals = Appeal.objects.all()
serializer = AppealSerializer(appeals, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class AppealDetail(APIView):
@swagger_auto_schema(
operation_description="Get Appeal detail",
responses={200: AppealSerializer()}
)
def get(self, request:Request, id):
appeal = Appeal.objects.get(id=id)
serializer = AppealSerializer(appeal)
return Response(serializer.data, status=status.HTTP_200_OK)
class AppealUpdate(APIView):
@swagger_auto_schema(operation_description="Update Appeal", request_body=AppealSerializer)
def post(self, request:Request, id):
data = request.data
appeal = Appeal.objects.get(id=id)
appeal.name = data.get('name', appeal.name)
appeal.phone_number = data.get('phone_number', appeal.phone_number)
appeal.emile = data.get('emile', appeal.emile)
appeal.message = data.get('message', appeal.message)
appeal.title = data.get('title', appeal.title)
appeal.save()
serializer = AppealSerializer(appeal)
return Response(serializer.data, status=status.HTTP_200_OK)
class AppealDelete(APIView):
@swagger_auto_schema(operation_description="Delete Appeal", request_body=AppealSerializer)
def post(self, request:Request, id):
appeal = Appeal.objects.get(id=id)
appeal.delete()
return Response({'message': 'Deleted'}, status=status.HTTP_200_OK)
|
quvvatullayev/tour
|
tour/views/appeal.py
|
appeal.py
|
py
| 2,625 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "rest_framework.views.APIView",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "rest_framework.request.Request",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "serializers.AppealSerializer",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_201_CREATED",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "drf_yasg.utils.swagger_auto_schema",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "serializers.AppealSerializer",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "rest_framework.views.APIView",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "rest_framework.request.Request",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "models.Appeal.objects.all",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "models.Appeal.objects",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "models.Appeal",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "serializers.AppealSerializer",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "drf_yasg.utils.swagger_auto_schema",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "serializers.AppealSerializer",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "rest_framework.views.APIView",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "rest_framework.request.Request",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "models.Appeal.objects.get",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "models.Appeal.objects",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "models.Appeal",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "serializers.AppealSerializer",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "drf_yasg.utils.swagger_auto_schema",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "serializers.AppealSerializer",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "rest_framework.views.APIView",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "rest_framework.request.Request",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "models.Appeal.objects.get",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "models.Appeal.objects",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "models.Appeal",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "serializers.AppealSerializer",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "drf_yasg.utils.swagger_auto_schema",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "serializers.AppealSerializer",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "rest_framework.views.APIView",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "rest_framework.request.Request",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "models.Appeal.objects.get",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "models.Appeal.objects",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "models.Appeal",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "drf_yasg.utils.swagger_auto_schema",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "serializers.AppealSerializer",
"line_number": 64,
"usage_type": "name"
}
] |
43865643259
|
import os
import re
import ssl
from datetime import datetime, timedelta
from typing import Any, Dict, Optional, TypeVar, Union
import ciso8601
T = TypeVar("T", str, None)
# From https://stackoverflow.com/questions/4628122/how-to-construct-a-timedelta-object-from-a-simple-string
# Answer: https://stackoverflow.com/a/51916936
# datetimeParseRegex = re.compile(r'^((?P<days>[\.\d]+?)d)?((?P<hours>[\.\d]+?)h)?((?P<minutes>[\.\d]+?)m)?((?P<seconds>[\.\d]+?)s)?$')
datetime_regex = re.compile(
r"^((?P<weeks>[\.\d]+?)w)? *"
r"^((?P<days>[\.\d]+?)d)? *"
r"((?P<hours>[\.\d]+?)h)? *"
r"((?P<minutes>[\.\d]+?)m)? *"
r"((?P<seconds>[\.\d]+?)s?)?$"
)
def parse_datetime(datetime: Union[datetime, str]) -> datetime:
"""Parses a datetime object or a string into a datetime object
Args:
datetime (Union[datetime.datetime, str]): Datetime object or string to parse
Returns:
datetime.datetime: Parsed datetime object
"""
if isinstance(datetime, str):
return ciso8601.parse_datetime(datetime)
return datetime
def encode_datetime(dict: Dict[str, Any]) -> Dict[str, Any]:
"""Takes a dictionary and encodes all datetime objects into ISO 8601 strings
Args:
dict (Dict[str, Any]): Dictionary to encode
Returns:
Dict[str, Any]: The dictionary with all datetime objects encoded as ISO 8601 strings
"""
for k, v in dict.items():
if isinstance(v, datetime):
dict[k] = v.isoformat()
return dict
def parse_subreddit(subreddit: Union[str, None]) -> str:
"""Parses a subreddit name to be used in a reddit url
Args:
subreddit (Union[str, None]): Subreddit name to parse
Returns:
str: Parsed subreddit name
"""
if subreddit is None:
return "all"
return re.sub(r"^[r/]{2}", "", subreddit, re.IGNORECASE)
def parse_time_str(time_str: str) -> Union[timedelta, None]:
"""Parse a time string e.g. (2h13m) into a timedelta object.
Taken straight from https://stackoverflow.com/a/4628148
Args:
time_str (str): A string identifying a duration. (eg. 2h13m)
Returns:
datetime.timedelta: A datetime.timedelta object
"""
parts = datetime_regex.match(time_str)
if not parts:
return
parts = parts.groupdict()
time_params = {}
for name, param in parts.items():
if param:
time_params[name] = int(param)
return timedelta(**time_params)
def setup_ssl(
ca_path: Union[str, None],
cert_path: str,
key_path: Union[str, None],
key_password: Union[str, None],
) -> ssl.SSLContext:
sslctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=ca_path)
sslctx.check_hostname = True
sslctx.load_cert_chain(cert_path, key_path, key_password)
return sslctx
def is_docker() -> bool:
path = "/proc/self/cgroup"
return os.path.exists("/.dockerenv") or (
os.path.isfile(path) and any("docker" in line for line in open(path))
)
def tick(opt: Optional[bool], label: Optional[str] = None) -> str:
lookup = {
True: "<:greenTick:330090705336664065>",
False: "<:redTick:330090723011592193>",
None: "<:greyTick:563231201280917524>",
}
emoji = lookup.get(opt, "<:redTick:330090723011592193>")
if label is not None:
return f"{emoji}: {label}"
return emoji
|
No767/Kumiko
|
Bot/Libs/utils/utils.py
|
utils.py
|
py
| 3,388 |
python
|
en
|
code
| 20 |
github-code
|
6
|
[
{
"api_name": "typing.TypeVar",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 32,
"usage_type": "argument"
},
{
"api_name": "ciso8601.parse_datetime",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 33,
"usage_type": "argument"
},
{
"api_name": "datetime.datetime",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 47,
"usage_type": "argument"
},
{
"api_name": "typing.Union",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "re.sub",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "re.IGNORECASE",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "ssl.create_default_context",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "ssl.Purpose",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "ssl.SSLContext",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 107,
"usage_type": "name"
}
] |
25208776927
|
from flask import Flask, request, jsonify
import os
import jwt
from flask_cors import CORS, cross_origin
from dynamodb import DB
application = Flask(__name__)
db = DB()
CORS(application, headers=['Content-Type', 'Authorization'], supports_credentials=True,
expose_headers='Authorization', origins='*')
JWT_SECRET = "datajbsnmd5h84rbewvzx6*cax^jgmqw@m3$ds_%z-4*qy0n44fjr5shark"
JWT_ALGO = "HS256"
@application.route('/')
def landing():
return "This is the homepage of the Explora server!!!!"
@application.route('/get_username/<repo_id>', methods=['POST', 'GET'])
def get_username(repo_id):
'''
Authorize request, then retrieve username for given repo_id
'''
claims = authorize_user(request)
if claims is None: return jsonify(make_unauthorized_error()), 400
user_id = claims["pk"]
try:
username = db.get_username(user_id, repo_id)
except Exception as e:
return jsonify(make_error(str(e)))
return jsonify(make_success(username))
def authorize_user(request):
"""
Helper function that authorizes a request/user based on the JWT Token
provided. Return the claims if successful, `None` otherwise.
"""
try:
jwt_string = request.get_json().get("token")
claims = jwt.decode(jwt_string, JWT_SECRET, algorithms=[JWT_ALGO])
except Exception as e:
print(str(e))
return None
return claims
def make_unauthorized_error():
"""
Helper function that returns an unauthorization error.
"""
return make_error('Authorization failed.')
def make_error(msg):
"""
Helper function to create an error message to return on failed requests.
"""
return {'success': False, 'message': msg}
def make_success(msg):
"""
Helper function to create a success message to return on successful requests.
"""
return {'success': True, 'message': msg}
if __name__ == '__main__':
from twisted.python import log
log.startLogging(sys.stdout)
application.run(host="0.0.0.0")
|
DiscreetAI/explora-server
|
server/main.py
|
main.py
|
py
| 2,020 |
python
|
en
|
code
| 9 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "dynamodb.DB",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 26,
"usage_type": "argument"
},
{
"api_name": "flask.jsonify",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "jwt.decode",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "twisted.python.log.startLogging",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "twisted.python.log",
"line_number": 69,
"usage_type": "name"
}
] |
13583035400
|
#!/usr/bin/env python3
import random
import base64
from argparse import ArgumentParser
from os import urandom
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
from flask import Flask, jsonify, request, send_from_directory
app = Flask(__name__)
flag = ""
key = b""
nonce = b""
leaks = []
def main():
global flag, leaks, key, nonce
key = urandom(32)
nonce = urandom(16)
flag = gen_flag()
leaks.append(base64.b64encode(encrypt(flag.encode('utf-8'))).decode('utf-8'))
for _ in range(0, 64):
leaks.append(base64.b64encode(encrypt(gen_flag().encode('utf-8'))).decode('utf-8'))
def encrypt(data):
cipher = Cipher(algorithms.AES(key), modes.CTR(nonce), backend=default_backend()).encryptor()
return cipher.update(data) + cipher.finalize()
def gen_flag():
a = "0123456789abcdef"
b = "FLAG-{"
for _ in range(0, 32):
b = b + random.choice(a)
b = b + "}"
return b
@app.route('/')
def get_index():
return send_from_directory('website', 'index.html')
@app.route('/api/verify', methods=["POST"])
def verify_secret():
if request.get_json().get('data') == flag:
return "You won!"
else:
return "Invalid!"
@app.route('/api/leaks')
def api_get_leak():
return jsonify(leaks)
@app.route('/<path:path>')
def get_website(path):
return send_from_directory('website', path)
main()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('-H',
'--host',
action='store',
dest='host',
default='127.0.0.1',
help='Host address')
parser.add_argument('-p',
'--port',
action='store',
dest='port',
default=5000,
help='Host port')
args = parser.parse_args()
app.run(host=args.host, port=args.port)
|
zer0x64/breaking-aes-101
|
challenges/ctr/ctr2/ctr2.py
|
ctr2.py
|
py
| 2,060 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.urandom",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.urandom",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "base64.b64encode",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "base64.b64encode",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.ciphers.Cipher",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.ciphers.algorithms.AES",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.ciphers.algorithms",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.ciphers.modes.CTR",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.ciphers.modes",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.backends.default_backend",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "flask.send_from_directory",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "flask.send_from_directory",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 75,
"usage_type": "call"
}
] |
72179452347
|
import json
with open("test.txt","r",encoding="utf-8") as f:
text = f.read()
"hallo".replace()
# removing unwanted characters from text
words = text.replace('\n',' ').replace('.',' ').replace(',',' ').replace(';',' ').replace('!',' ').replace('?',' ').replace(':',' ')
# split the text into list of words, drop empty words
words = [word.lower() for word in words.split(" ") if word]
# print(words)
wordCount = {}
for word in words:
if word in wordCount:
wordCount[word] = wordCount[word] + 1
else:
wordCount[word] = 1
maxcount = max(wordCount,key=wordCount.get)
print(maxcount,wordCount[maxcount])
# open file in write mode
with open('save.json','w',encoding="utf-8") as f:
# dump data as str to filestream
json.dump(wordCount,f,indent=4)
with open('save.json','r',encoding="utf-8") as f:
newWordCount = json.load(f)
print(newWordCount)
|
Zadest/python-5
|
word_count_dict.py
|
word_count_dict.py
|
py
| 889 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "json.dump",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 33,
"usage_type": "call"
}
] |
70911318267
|
# -*- coding: utf-8 -*-
import scrapy
from time import sleep
from random import randint
class ImdbSpiderSpider(scrapy.Spider):
name = 'imdb_spider'
allowed_domains = ['www.imdb.com']
start_urls = ['https://www.imdb.com/search/title/?release_date=2019-01-01,&sort=num_votes,desc']
page_count = 0
def parse(self, response):
all_movies = response.xpath('//div[@class="lister-item mode-advanced"]')
for movie in all_movies:
title = movie.xpath('normalize-space(.//h3/a/text())').extract_first()
duration = movie.xpath('.//p[@class="text-muted "]/span[@class="runtime"]/text()').extract_first()
genre = movie.xpath('normalize-space(.//p[@class="text-muted "]/span[@class="genre"]/text())').extract_first()
imdb_rating = movie.xpath('.//div[@class="inline-block ratings-imdb-rating"]/strong/text()').extract_first()
metascore_rating = movie.xpath('normalize-space(.//div[@class="inline-block ratings-metascore"]/span/text())').extract_first()
votes = movie.xpath('.//span[@name="nv"]/text()').extract_first()
yield {
'title': title,
'duration': duration,
'genre': genre,
'imdb_rating': imdb_rating,
'metascore_rating': metascore_rating,
'votes': votes
}
sleep(randint(2, 5))
next_page = response.xpath('//div[@class="desc"]/a[@class="lister-page-next next-page"]/@href').extract_first()
self.page_count += 1
if next_page and self.page_count < 40:
yield scrapy.Request(response.urljoin(next_page))
|
ArRosid/Scrapy-Project
|
scrapy_project/spiders/imdb_spider.py
|
imdb_spider.py
|
py
| 1,669 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "scrapy.Spider",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "scrapy.Request",
"line_number": 37,
"usage_type": "call"
}
] |
44476674074
|
from abc import ABCMeta, abstractmethod
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from app.config.settings import FONT_LABEL_TO_META, NUM_TOP_K
from app.domain.entity import BoundingBox, PredictFont
from app.domain.preprocess import Preprocessor
from PIL.Image import Image
from torchvision import models
def fetch_vgg16() -> nn.Module:
net = models.vgg16_bn(pretrained=False)
net.features[0] = nn.Conv2d(1, 64, 3, stride=1, padding=1)
net.classifier[6] = nn.Linear(4096, 365)
return net
class Predictor(metaclass=ABCMeta):
@abstractmethod
def predict(
self, image: Image, bounding_boxes: List[BoundingBox]
) -> List[PredictFont]:
raise NotImplementedError("Method not implemented")
class MockPredictor(Predictor):
def predict(
self, image: Image, bounding_boxes: List[BoundingBox]
) -> List[PredictFont]:
return [
PredictFont(
fontName="a",
fontNameJa="a",
fontNameEn="a",
fontWeight=100,
type="adobe",
adobeId="asssa",
score=0.1,
)
]
class FontPredictor(Predictor):
def __init__(self, preprocessor: Preprocessor, model: nn.Module) -> None:
self.preprocessor = preprocessor
self.model = model
def predict(
self, image: Image, bounding_boxes: List[BoundingBox]
) -> List[PredictFont]:
patches = self.preprocessor(image, bounding_boxes)
outputs = self.model(patches)
agg_outputs = torch.mean(outputs, dim=0)
top_fonts = torch.argsort(agg_outputs, descending=True)[:NUM_TOP_K].numpy()
scores = F.softmax(agg_outputs, dim=0)[top_fonts].detach().numpy()
return [
PredictFont(
fontName=FONT_LABEL_TO_META[f]["fontName"],
fontNameJa=FONT_LABEL_TO_META[f]["fontNameJa"],
fontNameEn=FONT_LABEL_TO_META[f]["fontNameEn"],
fontWeight=FONT_LABEL_TO_META[f]["fontWeight"],
type=FONT_LABEL_TO_META[f]["type"],
adobeId=FONT_LABEL_TO_META[f]["adobeId"],
score=round(s, 3),
)
for f, s in zip(top_fonts, scores)
]
|
kishimoto-banana/font-search-api
|
app/domain/predictor.py
|
predictor.py
|
py
| 2,308 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torchvision.models.vgg16_bn",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "abc.ABCMeta",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "PIL.Image.Image",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "app.domain.entity.BoundingBox",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "app.domain.entity.PredictFont",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "PIL.Image.Image",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "app.domain.entity.BoundingBox",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "app.domain.entity.PredictFont",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "app.domain.entity.PredictFont",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "app.domain.preprocess.Preprocessor",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "PIL.Image.Image",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "app.domain.entity.BoundingBox",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "torch.mean",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "torch.argsort",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "app.config.settings.NUM_TOP_K",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "app.domain.entity.PredictFont",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "app.config.settings.FONT_LABEL_TO_META",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "app.config.settings.FONT_LABEL_TO_META",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "app.config.settings.FONT_LABEL_TO_META",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "app.config.settings.FONT_LABEL_TO_META",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "app.config.settings.FONT_LABEL_TO_META",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "app.config.settings.FONT_LABEL_TO_META",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "app.domain.entity.PredictFont",
"line_number": 54,
"usage_type": "name"
}
] |
39373093044
|
import requests
__author__ = "Griffith Asare Awuah (@gwuah)"
class ogma():
"""Language Detection Library For Pythonistas"""
def __init__(self, accessKey):
self.payload = {'access_key': str(accessKey)}
def detect(self, phrase) :
self.payload['query'] = str(phrase)
try :
r = requests.get('http://apilayer.net/api/detect', self.payload)
self.response = r.json()
if (r.status_code == requests.codes.ok) and (self.response['success'] != False) :
# connection successful! You were able to get meaningful data from the endpoint
return "{}".format(self.response['results'][0]['language_name'])
else :
if r.status_code[0] == 4 :
# couldn't connect to language layer due to no inetrnet access
print("Detection wasn't sucessful. \nThere was an error from your side. \nCheck Your Internet Connection.")
elif r.status_code[0] == 5 :
# Youre connected to a network, but theres no internet access
print("Detection wasn't sucessful \nThere was an error from your server \nTry again later")
elif (self.response['success'] == False) and (self.response['error']['code'] == 101) :
# You didnt submit a correct payload probably
return self.response['error']['info'][:-41]
elif (self.response['success'] == False) and (self.response['error']['code'] == 210) :
# You didnt submit a correct payload probably
return self.response['error']['info'][:-43]
except requests.exceptions.ConnectionError :
print("Detection wasn't sucessful. \nYou are not connected to the internet Connection.")
|
gwuah/ogma
|
api.py
|
api.py
|
py
| 1,561 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "requests.codes",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "requests.exceptions",
"line_number": 32,
"usage_type": "attribute"
}
] |
71573663549
|
import requests
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import regex as re
from sqlalchemy import create_engine, String, Float, DATE
import pymssql
from datetime import date, datetime
import matplotlib.pyplot as plt
import os
from dotenv import load_dotenv
from empiricaldist import Cdf
import seaborn as sns
from glassdoor.scraper import *
import streamlit as st
import time
def salary_convert(salary):
if salary == 0:
return np.nan
if salary < 1000:
return salary * 1788
else:
return salary
env_path = os.path.join(r'/home/emad/code/emadam/glassdoor/glassdoor/',
'postgres_login.env')
if os.path.exists(env_path):
load_dotenv(env_path)
DATABASE = os.getenv('database')
USERNAME = os.getenv('username')
PASSWORD = os.getenv('password')
HOST = os.getenv('host')
engine = create_engine(
f"postgresql://{USERNAME}:{PASSWORD}@{HOST}:5432/{DATABASE}")
headers = {
"User-Agent":
"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/92.0.4515.159 Safari/537.36"
}
URL = f'https://www.glassdoor.com.au/Job/melbourne-junior-data-analyst-jobs-SRCH_IL.0,9_IC2264754_KO10,29.htm'
resp = requests.get(URL, headers=headers)
# specifying a desired format of page using the html parser
soup = BeautifulSoup(resp.text, "html.parser")
job_title = scraper.extract_job_title_from_result(soup)
co_name = scraper.extract_company_name_from_result(soup)
co_rate = scraper.extract_company_rate_from_result(soup)
co_loc = scraper.extract_company_location_from_result(soup)
co_sal = scraper.extract_company_salary_from_result(soup)
job_age = scraper.extract_job_age_from_result(soup)
data = list(zip(job_title, co_name, co_rate, co_loc, co_sal,
job_age))
job_data = pd.DataFrame(data)
job_data = job_data.rename(
columns={
0: 'Job Title',
1: 'Company',
2: 'Rank',
3: 'Location',
4: 'Salary',
5: 'Ad Date'
})
job_data['Ad Date'] = pd.to_datetime(job_data['Ad Date'])
job_data.to_sql("job_data", engine, if_exists='append', index=False)
jobs_stored = pd.read_sql("job_data", engine)
jobs_stored['Ad Date'] = pd.to_datetime(jobs_stored['Ad Date'])
jobs_stored.sort_values(by=['Ad Date'], inplace=True)
jobs_stored.drop_duplicates(subset=['Job Title', 'Company', 'Location'],
keep='first',
inplace=True)
ad_count = jobs_stored.groupby('Ad Date').size()
jobs_stored = jobs_stored.set_index(pd.DatetimeIndex(
jobs_stored['Ad Date'])).sort_index()
jobs_stored['Min_Salary'] = jobs_stored['Salary'].str.extract(
r'([0-9]+,*[0-9]+).*')
jobs_stored['Min_Salary'] = jobs_stored['Min_Salary'].str.replace(
r'\,', '', regex=True).astype(float).astype(pd.Int64Dtype())
jobs_stored['Max_Salary'] = jobs_stored['Salary'].str.extract(
r'[0-9]+,*[0-9]+.*?([0-9]+,*[0-9]+)')
jobs_stored['Max_Salary'] = jobs_stored['Max_Salary'].str.replace(
r'\,', '', regex=True).astype(float).astype(pd.Int64Dtype())
jobs_stored['Min_Salary'] = jobs_stored['Min_Salary'].fillna(value=0)
jobs_stored_min = jobs_stored.apply(lambda x: salary_convert(x['Min_Salary']),
axis=1)
jobs_stored['Min_Salary'] = pd.DataFrame(jobs_stored_min)
jobs_stored['Max_Salary'] = jobs_stored['Max_Salary'].fillna(value=0)
jobs_stored_max = jobs_stored.apply(lambda x: salary_convert(x['Max_Salary']),
axis=1)
jobs_stored['Max_Salary'] = pd.DataFrame(jobs_stored_max)
jobs_stored['Seniority'] = jobs_stored['Job Title'].apply(
lambda x: 'Senior' if x.find('Senior') != -1 else
('Junior' if x.find('Junior') != -1 else
('Entry Level' if x.find('Entry level') != -1 else ('Graduate' if x.find(
'Graduate') != -1 else ('Manager' if x.find('Manager') != -1 else (
'Internship' if x.find('Internship') != -1 else np.nan))))))
jobs_stored.dropna(subset=['Ad Date'], how='all', inplace=True)
plt.style.use('seaborn-whitegrid')
sns.set()
fig, ax = plt.subplots(2, 2)
fig.set_size_inches(16, 11)
# set the spacing between subplots
plt.subplots_adjust(left=0.1,
bottom=0.1,
right=0.9,
top=0.9,
wspace=0.4,
hspace=0.4)
min_salary = jobs_stored['Min_Salary']
before_Date = jobs_stored['Ad Date'] < pd.to_datetime('2022-01-01')
ax[0, 0].plot(Cdf.from_seq(min_salary[before_Date].dropna()),
label='Before 2022')
ax[0, 0].plot(Cdf.from_seq(min_salary[~before_Date].dropna()),
label='After 2022')
x_min = np.sort(jobs_stored['Min_Salary'].dropna())
y_min = np.arange(1, len(x_min) + 1) / len(x_min)
x_max = np.sort(jobs_stored['Max_Salary'].dropna())
y_max = np.arange(1, len(x_max) + 1) / len(x_max)
pct_list = np.array([25, 50, 75])
maxpct_val = np.percentile(jobs_stored['Max_Salary'].dropna(), pct_list)
minpct_val = np.percentile(jobs_stored['Min_Salary'].dropna(), pct_list)
ax[0, 0].set_ylabel('CDF')
ax[0, 0].set_title(
'Distribution of minimum salary of "Data Analyst" jobs on Glassdoor',
fontweight="bold",
pad=20)
ax[0, 0].legend()
ax[0, 0].set_xlabel('Estimated salary')
ax[0, 1].plot(x_min,
y_min,
marker='.',
linestyle='none',
color='r',
label='Minimum salary')
ax[0, 1].plot(x_max,
y_max,
marker='.',
linestyle='none',
color='b',
label='Maximum salary')
ax[0, 1].plot(maxpct_val,
pct_list / 100,
marker='^',
linestyle='none',
color='c',
label='25th, 50th and 75th Percentile')
ax[0, 1].plot(minpct_val,
pct_list / 100,
marker='^',
linestyle='none',
color='k',
label='25th, 50th and 75th Percentile')
ax[0, 1].annotate(
'Mean:',
xy=(jobs_stored['Min_Salary'].mean().astype('int64'), 0.5),
xytext=(40000, 0.9),
arrowprops=dict(arrowstyle="fancy",
facecolor='green',
connectionstyle="angle3,angleA=0,angleB=-90"),
)
ax[0, 1].set_ylabel('ECDF')
ax[0, 1].set_title(
'Distribution of min and max salary of "Data Analyst" on Glassdoor',
fontweight="bold",
pad=20)
ax[0, 1].legend()
ax[0, 1].set_xlabel('Estimated salary')
ax[1, 0].bar(jobs_stored.index.unique(), ad_count, linestyle='None', color='r')
ax[1, 0].figure.canvas.draw()
ax[1, 0].tick_params(axis='x',
which='major',
rotation=20,
direction='inout',
length=6,
width=2,
color='k',
labelcolor='royalblue')
ax[1, 0].set_xlabel('Date of Advertisement', labelpad=0.0, color='magenta')
ax[1, 0].set_ylabel('Number of Ads', color='purple')
ax[1, 0].set_title('\'Data Analyst Job\' Advertisements in Glassdoor website',
fontweight="bold",
pad=20)
ax[1, 1].pie(jobs_stored['Seniority'].value_counts(),
labels=jobs_stored['Seniority'].dropna().unique(),
normalize=True,
autopct='%1.1f%%',
shadow=True,
startangle=0)
ax[1, 1].set_title('Seniority of job ads(percent)', fontweight="bold", pad=20)
# fig.savefig("glassdoor" + np.datetime64(date.today()).astype('str') + ".png")
st.set_page_config(page_title='Data Analyst Job: Market Analysis',
page_icon='favicon.png',
layout="wide")
message = st.info("Fetching data from Database...")
with st.spinner('Please Wait...'):
my_bar = st.progress(0)
# Remove the menu button from Streamlit
st.markdown(""" <style>
MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style> """,
unsafe_allow_html=True)
my_bar.progress(25)
st.title('Data Analyst Job: Market Analysis')
my_bar.progress(50)
st.markdown("""
## Project Description 👇
This is a personal effort where I researched *"Data Analyst"* job openings in
Melbourne. As a result, this project shows minimum and maximum salary of a
**Data Analyst in Melbourne**, Australia according to job advertisements gathered
from [https://www.glassdoor.com.au/](https://www.glassdoor.com.au/) and
saves the results in a *PostgreSQL* database in order to have historical data
for further analysis.
""")
st.info(
'💡 The cumulative distribution function (CDF) of random variable X is defined as FX(x)=P(X≤x), for all x∈R.'
)
st.pyplot(fig)
my_bar.progress(100)
my_bar.empty()
message.info('Done!')
time.sleep(3)
message.empty()
agree = st.checkbox('Show DataFrame recent records')
if agree:
with st.spinner('Please Wait...'):
cm = sns.color_palette("coolwarm_r", as_cmap=True)
df = jobs_stored.reset_index(
drop=True).tail(10).sort_values(by='Ad Date', ascending=False).style.background_gradient(cmap=cm)
st.write(df)
|
emadam/glassdoor
|
app.py
|
app.py
|
py
| 9,171 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.nan",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "dotenv.load_dotenv",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "pandas.DatetimeIndex",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "pandas.Int64Dtype",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pandas.Int64Dtype",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "seaborn.set",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots_adjust",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "pandas.to_datetime",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "empiricaldist.Cdf.from_seq",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "empiricaldist.Cdf",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "empiricaldist.Cdf.from_seq",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "empiricaldist.Cdf",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "numpy.sort",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "numpy.sort",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "streamlit.set_page_config",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "streamlit.info",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "streamlit.spinner",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "streamlit.progress",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "streamlit.title",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "streamlit.info",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "streamlit.pyplot",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "streamlit.checkbox",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "streamlit.spinner",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "seaborn.color_palette",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 252,
"usage_type": "call"
}
] |
70945121788
|
# 웹에서 검색자료 읽은 후 워드 클라우드로 출력
from bs4 import BeautifulSoup
import urllib.request
from urllib.parse import quote
from boto.dynamodb import item
#keyword = input('검색어:')
keyword = '장마'
print(keyword)
print(quote(keyword))
# 동아일보 검색 기능 사용
target_url = "http://www.donga.com/news/search?query=" + quote(keyword)
sou_code = urllib.request.urlopen(target_url)
soup = BeautifulSoup(sou_code, 'lxml', from_encoding='utf-8')
#print(soup)
########################
msg = ""
for title in soup.find_all('p', 'tit'):
title_link = title.select('a')
#print(title_link)
article_url = title_link[0]['href']
#print(article_url)
sou_article = urllib.request.urlopen(article_url)
soup = BeautifulSoup(sou_article,'lxml', from_encoding='utf-8')
contents = soup.select('div.article_txt')
for imsi in contents:
item = str(imsi.find_all(text=True))
#print(item)
msg = msg + item
print(msg)
from konlpy.tag import Okt
from collections import Counter
okt = Okt()
nouns = okt.nouns(msg)
result = []
for imsi in nouns:
if len(imsi) > 1: # 2글자 이상만 참여
result.append(imsi)
print(result)
count = Counter(result)
tag = count.most_common(50) # 상위 50개만 참여
print(tag)
##########################################
import pytagcloud
# (min)maxsize : 글꼴크기,
taglist = pytagcloud.make_tags(tag, maxsize=100)
print(taglist)
pytagcloud.create_tag_image(taglist, "word.png", size=(1000,600),
fontname="Korean", rectangular=False)
# 이미지 읽기
# import matplotlib.pylab as plt
# import matplotlib.image as mpimg
# #%matplotlib inline
# img = mpimg.imread("word.png")
# plt.imshow(img)
# plt.show()
# 이미지 브라우저로 읽기
import webbrowser
webbrowser.open("word.png")
|
kangmihee/EX_python
|
py_morpheme/pack/morp3wordcloud.py
|
morp3wordcloud.py
|
py
| 1,969 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "urllib.parse.quote",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "urllib.parse.quote",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "urllib.request.request.urlopen",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "urllib.request.request.urlopen",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "boto.dynamodb.item",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "boto.dynamodb.item",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "konlpy.tag.Okt",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pytagcloud.make_tags",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pytagcloud.create_tag_image",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "webbrowser.open",
"line_number": 78,
"usage_type": "call"
}
] |
20156935479
|
from flask import request
def validate_id(id):
# if not found in params
if (id is None):
raise TypeError("Request params (id) not found")
# if description params is empty
if not id:
raise ValueError("id is empty")
# if not integer
if not isinstance(id, int):
raise TypeError("id is not integer")
def validate_latitude(latitude):
# if not found in params
if (latitude is None):
raise TypeError("Request params (latitude) not found")
# if not float
if not isinstance(latitude, float):
raise TypeError("latitude is not float")
def validate_longtitude(longtitude):
# if not found in params
if (longtitude is None):
raise TypeError("Request params (longtitude) not found")
# if not float
if not isinstance(longtitude, float):
raise TypeError("longtitude is not float")
def point_read_contract(request):
id = request.args.get('id', type=int)
validate_id(id)
return {
'id': int(id)
}
def point_create_contract(request):
latitude = request.args.get('latitude', type=float)
longtitude = request.args.get('longtitude', type=float)
validate_latitude(latitude)
validate_longtitude(longtitude)
return {
'latitude': float(latitude),
'longtitude': float(longtitude)
}
|
adriangohjw/cz2006-software-engineering
|
contracts/point_contracts.py
|
point_contracts.py
|
py
| 1,360 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.request.args.get",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 54,
"usage_type": "name"
}
] |
19214467121
|
from replit import clear
print("Welcome to the secret auction program!")
def highest_bidder(bid_record):
highest = 0
winner = ""
for bidder in bid_record:
bid_amount = bid_record[bidder]
if bid_amount > highest:
highest = bid_amount
winner = bidder
print(f"The winner is {winner} with a bid of ${highest}")
mapp = {}
restart = True
while restart:
name = input("What is your name?: ")
bid = int(input("What is your bid?: $"))
mapp[name] = bid
other_bidders = input("Are there any other bidders? Type 'Yes' or 'No'").lower()
if other_bidders == "yes":
restart = True
clear()
else:
restart = False
highest_bidder(mapp)
|
Iyemizee/Secret_Auction_Project
|
main.py
|
main.py
|
py
| 678 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "replit.clear",
"line_number": 24,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.