seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
290352937
|
"""Module for visualizing predictions as heat maps.
This module provides tools for creating and visualizing heat maps. The heat map of a given image is created by taking
samples from the image, predicting the labels of the samples and assembling the predictions in the heat map.
Example
-------
Train a neuronal network to identify the bright areas of a chessboard and visualize its predictions in a heat map.
>>> import numpy as np
>>> import os
>>> from data.training_data import TrainingData
>>> from neural_network import model
>>> from neural_network.predictor import Predictor
>>> from visualization import heat_map
>>> from visualization.pixel_interpolation import Interpolator
>>>
>>> # create three 10 by 10 pixel images with 3 color channels and store them in a data set for training
>>> black_image = np.full((10, 10, 3), 0)
>>> white_image = np.full((10, 10, 3), 255)
>>> grey_image = np.full((10, 10, 3), 127)
>>> training_data = TrainingData([black_image, white_image], [[0], [1]], [grey_image], [[1]])
>>>
>>> # setup the path to the directory where the model of the neural network is saved
>>> cwd = os.getcwd()
>>> model_directory = os.path.join(cwd, "brightness_detector")
>>>
>>> # create and train the model
>>> model.create(model_directory, 10, 3, 1)
>>> model.train(model_directory, training_data, max_iterations=15, save_frequency=2, max_save=5)
>>>
>>> # create an image of a chess board
>>> black_square = np.full((100, 100, 3), 0)
>>> white_square = np.full((100, 100, 3), 255)
>>> column_1 = np.concatenate((white_square, black_square, white_square, black_square,
>>> white_square, black_square, white_square, black_square), axis=0)
>>> column_2 = np.concatenate((black_square, white_square, black_square, white_square,
>>> black_square, white_square, black_square, white_square), axis=0)
>>> chess_board = np.concatenate((column_1, column_2, column_1, column_2,
>>> column_1, column_2, column_1, column_2), axis=1)
>>>
>>> # create a heat map of the bright areas of the chessboard image using the most recent training iteration of the
>>> # brightness detector model
>>> chess_board_hm = heat_map.create_heat_map(chess_board, Predictor(model_directory), sample_stepsize=50)
>>>
>>> # show the heat map
>>> heat_map.show_heat_map(chess_board, chess_board_hm)
"""
__author__ = "Dennis Kraft, Ariel Bridgeman, Tobias B <github.com/sezanzeb>"
__version__ = "1.0"
import numpy as np
import cv2
from matplotlib import pyplot as plt
from .interpolation import NearestNeighbour
from .sampler import RandomGrid
from scipy.spatial.distance import cdist
from hwdetect.utils import show
from sklearn.neighbors import KNeighborsRegressor
import time
import logging
# __name__ is hwdetect.visualization.heat_map
logger = logging.getLogger(__name__)
def create_heat_map(image, predictor,
label_aggregator=lambda labels: labels[0],
sampler=RandomGrid(),
preprocessors=[],
interpolator=NearestNeighbour(),
postprocessors=[],
heat_map_scale=10,
return_preprocessed=False):
"""Create a heat map of an image based on the predictions made by the specified neural network model.
Parameters
----------
image : np.array
The image for which the heat map is created
predictor : neural_network.predictor.Predictor
The predictor object that is used for predicting the heat map.
label_aggregator : function from list of floats to float, optional
The function that is used for combining the labels predicted by the predictor into a single label. By default,
only the first label is used.
sampler : class implementing visualization.pixel_interpolation.RandomSampler
The sampler that is used for drawing samples from the image and predicting their labels.
preprocessors : list of functions from np.array to np.array
A list of the image processing functions that are applied to the original image before starting the sampling and
prediction phase. The preprocessors are applied in the order of the list. The goal of the preprocessors is to
remove machine writing, so that the predictor can jump over those chunks that are white after preprocessing.
The original image is used for prediction though.
interpolator : class implementing visualization.pixel_interpolation.Interpolator
The interpolator that is used to infer the pixels of the heat map based on the predictions made in the sampling
and prediction phase.
heat_map_scale : int
The resolution of the heat map. For instance, a resolution of 5 implies that squares of 5 by 5 pixels in the
original image are condensed into 1 pixel in the heat map.
return_preprocessed : bool
If True, will return a 2-tuple (heat_map, preprocessed)
Default: False
Returns
-------
np.array
A two dimensional array representing the heat map. Note that the height and width of the array matches the
height and width of the original image scaled down by the heat map resolution parameter.
"""
original = image.copy()
# set up the heat map
height = image.shape[0]
width = image.shape[1]
heat_map_scale = min(heat_map_scale, min(width, height))
heat_map = np.zeros((height // heat_map_scale, width // heat_map_scale))
# preprocess image
if len(preprocessors) > 0:
logger.info('preprocessing...')
for preprocessor in preprocessors:
image = preprocessor.filter(image)
# make predictions
logger.info('predicting...')
predictions = sampler.sample(image, predictor, label_aggregator, original)
X_pred = [k for k in predictions]
Y_pred = [predictions[k] for k in predictions]
interpolator.fit(X_pred, Y_pred)
# create list of tuples (y, x) for all the coordinates in the heatmap
# [0] is height, [1] is width
coords = np.concatenate(
np.dstack(np.mgrid[:heat_map.shape[0], :heat_map.shape[1]]))
coords_scaled = coords * heat_map_scale
logger.info('interpolating...')
values = interpolator.predict(coords_scaled)
heat_map = values.reshape(heat_map.shape)
if len(postprocessors) > 0:
# postprocess heat_map
logger.info('postprocessing...')
for postprocessor in postprocessors:
heat_map = postprocessor.filter(heat_map)
logger.info('done')
if return_preprocessed:
return heat_map, image
# default behaviour:
return heat_map
def heat_map_to_img(heat_map):
"""Convert percentages from heat_map to a grayscale image
Parameters
----------
heat_map : np.array
The heat map of an image.
Returns
----------
gray : np.array
The grayscale image of the heat map.
"""
if heat_map.max() <= 1:
heat_map *= 255
heat_map = heat_map.astype(np.uint8)
return heat_map
def bounded_image(image, heat_map, bound_type="box", perc_thresh=0.90):
"""Create image with bounding boxes or contours using the heat map
Parameters
----------
image : np.array
The image that is plotted.
heat_map : np.array
The heat map put on top of the image
bound_type : str
The string used to specify whether to use a "box" or "contour" for bounding.
per_thresh ; float between 0 and 1
The float to set the threshold for which grayscale values to set to black or white.
Returns
----------
np.array
image with bounding objects
"""
image = image.copy()
# make sure they are of the same height and width
if image.shape[:2] != heat_map.shape[:2]:
h, w = image.shape[:2]
heat_map = cv2.resize(heat_map, (w, h))
# convert heat map to image
hm_img = heat_map_to_img(heat_map)
# set threshold at % of 255
limit = int(perc_thresh * 255)
ret, thresh = cv2.threshold(hm_img, limit, 255, 0)
_, contours, _ = cv2.findContours(thresh, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
if bound_type == "contour":
bound_img = cv2.drawContours(image, contours, -1, (0, 0, 255), 10)
elif bound_type == "box":
bound_img = image
for c in contours:
# fit with rotated rectangle
# ( center (x,y), (width, height), angle of rotation
rect = cv2.minAreaRect(c)
# if angle of rotated rectangle within 5 deg, draw normalrectangle
if abs(rect[2]) < 5:
x, y, w, h = cv2.boundingRect(c)
# reject small samples; local fluctuations
if w * h > 900:
bound_img = cv2.rectangle(
bound_img, (x, y), (x + w, y + h), (160, 101, 179), 10)
else:
w, h = rect[1]
# reject small samples; local fluctuations
if w * h > 900:
box = cv2.boxPoints(rect)
box = np.int0(box)
bound_img = cv2.drawContours(
bound_img, [box], 0, (160, 101, 179), 10)
return bound_img
def plot_heat_map(image, heat_map, bounding_box=None, bound_type="box", save_as=""):
"""Overlay an image with the specified heat map or bounding box and plot the result.
Parameters
----------
image : np.array
The image that is plotted.
heat_map : np.array
The heat map put on top of the image
bounding_box: bool
The boolean to specify whether to use a bounding box or not
bound_type: str
The string used to specify whether to use a "box" or "contour" for bounding.
"""
height, width, _ = image.shape
hm = cv2.resize(heat_map, (width, height), interpolation=cv2.INTER_NEAREST)
plt.figure(figsize=(10, 10))
RGB_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if bounding_box:
bound_img = bounded_image(RGB_img, hm, bound_type=bound_type)
plt.imshow(bound_img, origin="upper", aspect='equal')
else:
plt.imshow(RGB_img)
plt.imshow(hm,
cmap=plt.cm.viridis,
alpha=.6,
interpolation='bilinear',
vmin=0,
vmax=1,
origin="upper",
aspect='equal')
cbar = plt.colorbar()
# needed to fix striations that appear in color bar with assumed alpha
# level
cbar.set_alpha(1)
cbar.draw_all()
plt.xticks([])
plt.yticks([])
if save_as == "":
plt.show()
else:
plt.savefig(save_as, bbox_inches='tight', pad_inches=0)
plt.clf()
| null |
hwdetect/visualization/heat_map.py
|
heat_map.py
|
py
| 11,006 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "sampler.RandomGrid",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "interpolation.NearestNeighbour",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "sampler.sample",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "numpy.dstack",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "numpy.mgrid",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "numpy.uint8",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "cv2.findContours",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_TREE",
"line_number": 215,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_SIMPLE",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "cv2.drawContours",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "cv2.minAreaRect",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "cv2.boundingRect",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "cv2.boxPoints",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "numpy.int0",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "cv2.drawContours",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_NEAREST",
"line_number": 262,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 264,
"usage_type": "name"
},
{
"api_name": "cv2.cvtColor",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 266,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 270,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 272,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 273,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.cm",
"line_number": 274,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 274,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.colorbar",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 281,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 288,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 290,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 292,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 294,
"usage_type": "name"
}
] |
140063126
|
import os
import platform
import re
from datetime import datetime
import constants
def nextAccountNumber():
last = max((i for i in constants.acctMap.items() if i[1].isdigit()), key=lambda x: int(x[1]))
last = "{:0>6}".format(int(last[1])+1)
return last
def buildOfficeAllyURL(month, day, year, office=""):
return "https://pm.officeally.com/pm/Appointments/ViewAppointments.aspx?Tab=A&View=d&Day={}&Month={}&Year={}&ProviderID=&OfficeID={}&StatusID=&TimeInterval=30&DailyMode=".format(day, month, year, office)
def valiDate(possibleDateString):
regex = re.compile(r'((?:^|(?<=\D))(\d{1,2})[/\s\-\.](\d{1,2})[/\s\-\.](\d{2,4})(?:$|(?=\D)))')
if len(regex.findall(possibleDateString)) > 0:
return True
return False
def parseDate(dateString):
regex = re.compile(r'((?:^|(?<=\D))(\d{1,2})[/\s\-\.](\d{1,2})[/\s\-\.](\d{2,4})(?:$|(?=\D)))')
dates = [date[0].replace("-", ".") for date in regex.findall(dateString)]
result = []
for date in dates:
try:
return datetime.strptime(date, "%m/%d/%Y").date()
except ValueError:
return datetime.strptime(date, "%m.%d.%Y").date()
return result
def clearEmptyCaptures(matches):
return [x for y in matches for x in y if x != ""]
def validateClaim(possibleClaimString, ssn=""):
regex = re.compile(r'(?:(?:^|(?<=[\D\s]))(\d*[A-Za-z\-]+\d*[A-Za-z\-]*\d*)(?:$|(?=\s)))|(?:^(?:claim|cl))[#:\s]*([\w\-]*\d[\w\-]*)|(?:^|(?<=\D))(\d{0,5})(?:$|(?=\D))|(?:^|(?<=\D))(\d{10,})(?:$|(?=\D))|^(?!panel)(?:\w[^\d])*[#:\s]*([02-9](?:\d){6,7})(?:$|(?=\D))|(?:^|(?<=\D))(((?!'+ssn+')\d){9})(?:$|(?=\D))', re.I)
matches = clearEmptyCaptures(regex.findall(possibleClaimString))
if len(matches) > 0 and any([match.replace("-","") != ssn for match in matches]):
return True
return False
def parseClaim(claimString, ssn=" "):
regex = re.compile(r'(?:(?:^|(?<=[\D\s]))(\d*[A-Za-z\-]+\d*[A-Za-z\-]*\d*)(?:$|(?=\s)))|(?:^(?:claim|cl))[#:\s]*([\w\-]*\d[\w\-]*)|(?:^|(?<=\D))(\d{0,5})(?:$|(?=\D))|(?:^|(?<=\D))(\d{10,})(?:$|(?=\D))|^(?!panel)(?:\w[^\d])*[#:\s]*([02-9](?:\d){6,7})(?:$|(?=\D))|(?:^|(?<=\D))(((?!'+ssn+')\d){9})(?:$|(?=\D))', re.I)
matches = clearEmptyCaptures(regex.findall(claimString))
approvedMatches= []
for match in matches:
if "panel" not in match.lower():
approvedMatches.append(match)
if len(approvedMatches) > 0:
return approvedMatches[0] if approvedMatches[0].replace("-","") != ssn else None
else:
return None
def orderedSort(li, order, key=lambda x: x):
ordered = []
keys = [key(x) for x in li]
for i in order:
assert i in keys
ordered.append(li[keys.index(i)])
nonOrdered = sorted((i for i in li if i not in ordered), key=key)
return ordered + nonOrdered
def getAuth(site):
with open("./auth") as file:
lines = list(file)
for line in lines:
line.split(":")
if line[0] == site:
return line[0]
raise ValueError("No auth for "+site)
def pause(message):
return input(message)
| null |
utils.py
|
utils.py
|
py
| 3,089 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "constants.acctMap.items",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "constants.acctMap",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "re.compile",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_number": 44,
"usage_type": "attribute"
}
] |
627859833
|
import direct.directbase.DirectStart
from direct.gui.OnscreenText import OnscreenText
from direct.gui.DirectGui import *
from panda3d.core import *
#add some text
bk_text = "This is my Demo"
textObject = OnscreenText(text = bk_text, pos = (0.95,-0.95),
scale = 0.07,fg=(1,0.5,0.5,1),align=TextNode.ACenter,mayChange=1)
#callback function to set text
def setText(textEntered):
textObject.setText(textEntered)
#clear the text
def clearText():
b.enterText('')
#add button
b = DirectEntry(text = "" ,scale=.05,command=setText,
initialText="Type Something", numLines = 2,focus=1,focusInCommand=clearText)
#run the tutorial
base.run()
| null |
testingUserInput.py
|
testingUserInput.py
|
py
| 644 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "direct.gui.OnscreenText.OnscreenText",
"line_number": 8,
"usage_type": "call"
}
] |
120718092
|
"""
Utility functions shared among the mod maker and prior generation code.
Compatibility notes:
* We have found that Pandas version 0.20 is incompatible with this module. Pandas >= 0.24 works.
"""
from __future__ import print_function, division
import datetime as dt
from collections import OrderedDict
from datetime import timedelta
import numpy
from dateutil.relativedelta import relativedelta
import netCDF4 as ncdf
import numpy as np
from numpy import ma
import os
import pandas as pd
import re
from numpy.core._multiarray_umath import arctan, tan, sin, cos
from scipy.interpolate import interp1d, interp2d
import subprocess
import sys
from . import mod_constants as const
from .mod_constants import days_per_year
from .ggg_logging import logger
_std_model_pres_levels = np.array([1000.0, 975.0, 950.0, 925.0, 900.0, 875.0, 850.0, 825.0, 800.0, 775.0, 750.0, 725.0,
700.0, 650.0, 600.0, 550.0, 500.0, 450.0, 400.0, 350.0, 300.0, 250.0, 200.0, 150.0,
100.0, 70.0, 50.0, 40.0, 30.0, 20.0, 10.0, 7.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.7, 0.5,
0.4, 0.3, 0.1])
# as of 2019-03-14, both GEOS-FP and MERRA-2 use the same standard 42 pressure levels
# (c.f. https://gmao.gsfc.nasa.gov/GMAO_products/documents/GEOS_5_FP_File_Specification_ON4v1_2.pdf page 52 and
# https://gmao.gsfc.nasa.gov/reanalysis/MERRA/docs/MERRA_File_Specification.pdf page 27 for GEOS-FP and MERRA-2
# respectively)
merra_pres_levels = _std_model_pres_levels
geosfp_pres_levels = _std_model_pres_levels
earth_radius = 6371 # kilometers
class TropopauseError(Exception):
"""
Error if could not find the tropopause
"""
pass
class ModelError(Exception):
"""
Error if a model file is nonsensical.
"""
pass
class GGGPathError(Exception):
pass
class ProgressBar(object):
"""
Create a text-based progress bar
An instance of this class can be used to print a text progress bar that does not need a new line for each progress
step. It uses carriage returns to reset to the beginning of each line before printing the next. This therefore
does not work well if other print statements occur in between calls to :meth:`print_bar`, the progress bar will
either end up on a new line anyway or potentially overwrite previous print statements if they did not end with a
newline.
:param num_symbols: how many steps there should be in the progress bar. In other words, the progress bar will be
complete when :meth:`print_bar` is called with ``num_symbols-1``.
:type num_symbols: int
:param prefix: a string to include before the beginning of each progress bar. The class will ensure that at least
one space is present between the prefix and the progress bar, but will not add one if one is already present at
the end of the prefix.
:type prefix: str
:param suffix: a string to include at the end of each progress bar. The class will ensure that at least one space
is present between the progress bar and the suffix, but will not add one if one is already present at the beginning
of the suffix.
:type suffix: str
:param add_one: if ``True``, the number of symbols printed in the progress bar is equal to ``i+1`` where ``i`` is
the argument to :meth:`print_bar`. This works well with Python loops over ``i in range(n)``, since the last value
of ``i`` will be ``n-1``, setting ``add_one`` to ``True`` ensures that a full progress bar is printed at the end.
:type add_one: bool
:param style: can be either '*' or 'counter'. The former prints a symbolic progress bar of the form:
[* ]
[** ]
[*** ]
[****]
where the number of *'s is set by ``num_symbols``. The latter will instead print 'i/num_symbols' for each step.
:type style: str
"""
def __init__(self, num_symbols, prefix='', suffix='', add_one=True, style='*'):
"""
See class help.
"""
if len(prefix) > 0 and not prefix.endswith(' '):
prefix += ' '
if len(suffix) > 0 and not suffix.startswith(' '):
suffix = ' ' + suffix
if style == '*':
self._fmt_str = '{pre}[{{pstr:<{n}}}]{suf}'.format(pre=prefix, n=num_symbols, suf=suffix)
elif style == 'counter':
self._fmt_str = '{pre}{{i:>{l}}}/{n}{suf}'.format(pre=prefix, n=num_symbols, suf=suffix, l=len(str(num_symbols)))
else:
raise ValueError('style "{}" not recognized'.format(style))
self._add_one = add_one
def print_bar(self, i):
"""
Print the iteration of the progress bar corresponding to step ``i``.
:param i: defines the progress step, either the number of *'s to print with ``style='*'`` or the counter number
with ``style='counter'``.
:type i: int
:return: None, prints to screen.
"""
if self._add_one:
i += 1
pstr = '*' * i
pbar = self._fmt_str.format(pstr=pstr, i=i)
sys.stdout.write('\r' + pbar)
sys.stdout.flush()
def finish(self):
"""
Close the progress bar. By default, just prints a newline.
:return: None
"""
sys.stdout.write('\n')
sys.stdout.flush()
def check_depedencies_newer(out_file, *dependency_files):
"""
Check if
:param out_file:
:param dependency_files:
:return:
"""
if len(dependency_files) == 0:
raise ValueError('Give at least one dependency file')
out_last_modified = os.path.getmtime(out_file)
for dep in dependency_files:
if os.path.getmtime(dep) > out_last_modified:
return True
return False
def get_num_header_lines(filename):
"""
Get the number of header lines in a standard GGG file
This assumes that the file specified begins with a line with two numbers: the number of header rows and the number
of data columns.
:param filename: the file to read
:type filename: str
:return: the number of header lines
:rtype: int
"""
with open(filename, 'r') as fobj:
header_info = fobj.readline()
return int(header_info.split()[0])
def _write_header(fobj, header_lines, n_data_columns, file_mode='w'):
line1 = ' {} {}\n'.format(len(header_lines)+1, n_data_columns)
fobj.write(line1)
header_lines = [l if l.endswith('\n') else l + '\n' for l in header_lines]
fobj.writelines(header_lines)
def read_mod_file(mod_file, as_dataframes=False):
"""
Read a TCCON .mod file.
:param mod_file: the path to the mod file.
:type mod_file: str
:param as_dataframes: if ``True``, then the collection of variables will be kept as dataframes. If ``False``
(default), they are converted to dictionaries of floats or numpy arrays.
:type as_dataframes: bool
:return: a dictionary with keys 'file' (values derived from file name), 'constants' (constant values stored in the
.mod file header), 'scalar' (values like surface height and tropopause pressure that are only defined once per
profile) and 'profile' (profile variables) containing the respective variables. These values will be dictionaries
or data frames, depending on ``as_dataframes``.
:rtype: dict
"""
n_header_lines = get_num_header_lines(mod_file)
# Read the constants from the second line of the file. There's no header for these, we just have to rely on the
# same constants being in the same position.
constant_vars = pd.read_csv(mod_file, sep='\s+', header=None, nrows=1, skiprows=1,
names=('earth_radius', 'ecc2', 'obs_lat', 'surface_gravity',
'profile_base_geometric_alt', 'base_pressure', 'tropopause_pressure'))
# Read the scalar variables (e.g. surface pressure, SZA, tropopause) first. We just have to assume their headers are
# on line 3 and values on line 4 of the file, the first number in the first line gives us the line the profile
# variables start on.
scalar_vars = pd.read_csv(mod_file, sep='\s+', header=2, nrows=1)
# Now read the profile vars.
profile_vars = pd.read_csv(mod_file, sep='\s+', header=n_header_lines-1)
# Also get the information that's only in the file name (namely date and longitude, we'll also read the latitude
# because it's there).
file_vars = dict()
base_name = os.path.basename(mod_file)
file_vars['datetime'] = find_datetime_substring(base_name, out_type=dt.datetime)
file_vars['lon'] = find_lon_substring(base_name, to_float=True)
file_vars['lat'] = find_lat_substring(base_name, to_float=True)
# Check that the header latitude and the file name latitude don't differ by more than 0.5 degree. Even if rounded
# to an integer for the file name, the difference should not exceed 0.5 degree.
lat_diff_threshold = 0.5
if np.abs(file_vars['lat'] - constant_vars['obs_lat'].item()) > lat_diff_threshold:
raise ModelError('The latitude in the file name and .mod file header differ by more than {lim} deg ({name} vs. '
'{head}). This indicates a possibly malformed .mod file.'
.format(lim=lat_diff_threshold, name=file_vars['lat'], head=constant_vars['obs_lat'].item())
)
out_dict = dict()
if as_dataframes:
out_dict['file'] = pd.DataFrame(file_vars)
out_dict['constants'] = constant_vars
out_dict['scalar'] = scalar_vars
out_dict['profile'] = profile_vars
else:
out_dict['file'] = file_vars
out_dict['constants'] = {k: v.item() for k, v in constant_vars.items()}
out_dict['scalar'] = {k: v.item() for k, v in scalar_vars.items()}
out_dict['profile'] = {k: v.values for k, v in profile_vars.items()}
return out_dict
def datetime_from_mod_filename(mod_file):
dstr = re.search(r'\d{8}_\d{4}Z', os.path.basename(mod_file)).group()
return dt.datetime.strptime(dstr, '%Y%m%d_%H%MZ')
def read_map_file(map_file, as_dataframes=False, skip_header=False):
"""
Read a .map file
:param map_file: the path to the .map file
:type map_file: str
:param as_dataframes: set to ``True`` to return the constants and profiles data as Pandas dataframes. By default,
(``False``) they are returned as dictionaries of numpy arrays.
:type as_dataframes: bool
:param skip_header: set to ``True` to avoid reading the header. This is helpful for reading older .map files that
have a slightly different header format.
:type skip_header: bool
:return: a dictionary with keys 'constants' and 'profile' that hold the header values and main profile data,
respectively. The form of these values depends on ``as_dataframes``.
:rtype: dict
"""
n_header_lines = get_num_header_lines(map_file)
constants = dict()
if not skip_header:
with open(map_file, 'r') as mapf:
n_skip = 4
# Skip the first four lines to get to the constants - these should be (1) the number of header lines &
# columns, (2) filename, (3) version info, and (4) wiki reference.
for i in range(n_skip):
mapf.readline()
# The last two lines of the header are the column names and units; everything between line 5 and that should
# be physical constants. Start at n_skip+1 to account for 0 indexing vs. number of lines.
for i in range(n_skip+1, n_header_lines-1):
line = mapf.readline()
# Lines have the form Name (units): value - ignore anything in parentheses
name, value = line.split(':')
name = re.sub(r'\(.+\)', '', name).strip()
constants[name] = float(value)
df = pd.read_csv(map_file, header=n_header_lines-2, skiprows=[n_header_lines-1], na_values='NAN')
# Sometimes extra space gets kept in the headers - remove that
df.rename(columns=lambda h: h.strip(), inplace=True)
if not as_dataframes:
data = {k: v.values for k, v in df.items()}
else:
data = df
out_dict = dict()
out_dict['constants'] = constants
out_dict['profile'] = data
return out_dict
def read_integral_file(integral_file, as_dataframes=False):
"""
Read an integral file that defines an altitude grid for GGG
:param integral_file: the path to the integral file
:type integral_file: str
:param as_dataframes: if ``True``, the information in the file is returned as a single dataframe. If ``False``, it
is returned as a dict of numpy arrays.
:type as_dataframes: bool
:return: the table of altitudes and mean molecular weights.
:rtype: :class:`pandas.DataFrame` or dict
"""
df = pd.read_csv(integral_file, sep=r'\s+', header=None, names=['Height', 'mmw'])
if as_dataframes:
return df
else:
return {k: v.to_numpy() for k, v in df.items()}
def read_isotopes(isotopes_file, gases_only=False):
"""
Read the isotopes defined in an isotopologs.dat file
:param isotopes_file: the path to the isotopologs.dat file
:type isotopes_file: str
:param gases_only: set to ``True`` to return a tuple of only the distinct gases, not the individual isotopes.
Default is ``False``, which includes the different isotope numbers.
:type gases_only: bool
:return: tuple of isotope or gas names
:rtype: tuple(str)
"""
nheader = get_num_header_lines(isotopes_file)
with open(isotopes_file, 'r') as fobj:
for i in range(nheader):
fobj.readline()
isotopes = []
for line in fobj:
iso_number = line[3:5].strip()
iso_name = line[6:14].strip()
if not gases_only:
iso_name = iso_number + iso_name
if iso_name not in isotopes:
isotopes.append(iso_name)
return tuple(isotopes)
def get_isotopes_file(isotopes_file=None, use_gggpath=False):
"""
Get the path to the isotopologs.dat file
:param isotopes_file: user input path. If this is not None, it is returned after checking that it exists.
:type isotopes_file: str or None
:param use_gggpath: set to ``True`` to find the isotopologs.dat file at the location defined by the GGGPATH
environmental variable. If ``False`` and ``isotopes_file`` is ``None`` then the isotopologs.date file included in
this repo is used.
:type use_gggpath: bool
:return: the path to the isotopologs.dat file
:rtype: str
"""
if isotopes_file is not None:
if not os.path.isfile(isotopes_file):
raise IOError('The isotopes path {} is not a file'.format(isotopes_file))
return isotopes_file
elif use_gggpath:
gggpath = os.getenv('GGGPATH')
if gggpath is None:
raise EnvironmentError('use_gggpath=True requires the GGGPATH environmental variable to be set')
isotopes_file = os.path.join(gggpath, 'isotopologs', 'isotopologs.dat')
if not os.path.isfile(isotopes_file):
raise IOError('Failed to find isotopologs.dat at {}. Either update your GGGPATH environmental variable or '
'set use_gggpath to False.'.format(isotopes_file))
return isotopes_file
else:
return os.path.join(const.data_dir, 'isotopologs.dat')
def map_file_name(site_abbrev, obs_lat, obs_date):
return '{}{}_{}.map'.format(site_abbrev, format_lat(obs_lat, prec=0), obs_date.strftime('%Y%m%d_%H%M'))
def write_map_file(map_file, site_lat, trop_eqlat, prof_ref_lat, surface_alt, tropopause_alt, strat_used_eqlat,
variables, units, var_order=None, req_all_vars=False, converters=None):
"""
Create a .map file
:param map_file: the full name to save the map file as
:type map_file: str
:param site_lat: the geographic latitude of the site.
:type site_lat: float
:param trop_eqlat: the equivalent latitude, derived from the GEOS lat vs. theta climatology, used to create the
tropospheric part of the profiles.
:type trop_eqlat: float
:param prof_ref_lat: the constant reference latitude used for the tropospheric age of air and seasonal cycle
functions.
:type prof_ref_lat: float
:param surface_alt: the surface altitude from the .mod file in kilometers
:type surface_alt: float
:param tropopause_alt: the altitude of the tropopause for this profile (in kilometers).
:type tropopause_alt: float
:param strat_used_eqlat: whether or not the stratospheric part of the profile used PV-derived equivalent latitude.
``False`` means that the geographic latitude of the site was used instead.
:type strat_used_eqlat: bool
:param variables: a dictionary where the keys will be used as the column names and the values should be 1D
array-like values to be written to the map file.
:type variables: dict(str: array-like)
:param units: a dictionary that must have the same keys as ``variables`` where the values define the units to print
in the line under the variable names in the .map file
:type units: dict(str: str)
:param var_order: optional, if given, a sequence of the keys in ``variables`` and ``units`` that defines what order
they are to be written to the .map file. If ``variables`` is an OrderedDict, then this is not necessary. May omit
keys from ``variables`` to skip writing those variables.
:type var_order: sequence(str)
:param req_all_vars: optional, set to ``True`` to require that all keys in ``variables`` are contained in
``var_order``.
:type req_all_vars: bool
:param converters: optional, a dictionary defining converter functions for different inputs. The keys must be keys
in ``variables`` and the values functions that accept one input, which will be a single value from that variable
(not the whole vector), and return a scalar numeric output.
:type converters: dict
:return: None
"""
def no_convert(val):
return val
# variables and units must have the same keys
if var_order is None:
var_order = list(variables.keys())
if req_all_vars and (set(var_order) != set(variables.keys()) or set(var_order) != set(units.keys())):
raise ValueError('variables and units must be dictionaries with the same keys, and both must match the '
'keys in var_order (if given)')
k1 = var_order[0]
size_check = np.size(variables[k1])
for k, v in variables.items():
if np.ndim(v) != 1:
raise ValueError('All values in variables must be 1 dimensional. {} is not.'.format(k))
elif np.size(v) != size_check:
raise ValueError('All values in variables must have the same shape. {badvar} has a different shape '
'({badshape}) than {chkvar} ({chkshape})'.format(badvar=k, badshape=np.shape(v),
chkvar=k1, chkshape=size_check))
converters = dict() if converters is None else converters
for k in var_order:
if k not in converters:
converters[k] = no_convert
header_lines = []
# Header line 2: file name (no path)
header_lines.append(os.path.basename(map_file))
# Header line 3: version info
hg_parent, hg_branch, hg_date = hg_commit_info()
header_lines.append('{pgrm:19} {vers:14} ({branch:19}) {date} {author:10}'
.format(pgrm='MOD_MAKER.py', vers=hg_parent, branch=hg_branch, date=hg_date, author='SR, MK, JL'))
# Header line 4: wiki link
header_lines.append('Please see https://tccon-wiki.caltech.edu for a complete description of this file and its usage.')
# Header line 5 to (n-2): constants/site lat
header_lines.append('Avodagro (molecules/mole): {}'.format(const.avogadro))
header_lines.append('Mass_Dry_Air (kg/mole): {}'.format(const.mass_dry_air))
header_lines.append('Mass_H2O (kg/mole): {}'.format(const.mass_h2o))
header_lines.append('Latitude (degrees): {}'.format(site_lat))
header_lines.append('Trop. eqlat (degrees): {:.2f}'.format(trop_eqlat))
header_lines.append('Ref. lat (degrees): {}'.format(prof_ref_lat))
header_lines.append('Surface altitude (km): {}'.format(surface_alt))
header_lines.append('Tropopause (km): {}'.format(tropopause_alt))
header_lines.append('Stratosphere used eq lat: {}'.format(int(strat_used_eqlat)))
# Line 1: number of header lines and variable columns
# The number of header lines is however many we've made so far, plus this one, the column names, and the column
# units (3 extra)
header_lines.insert(0, '{} {}'.format(len(header_lines)+3, len(variables)))
# Go ahead and write the header to the file
with open(map_file, 'w') as mapf:
for line in header_lines:
mapf.write(line + '\n')
# Now we write the variable names, units, and values. Need to get a list of keys to make sure the order we
# iterate through them is the same
mapf.write(','.join(var_order) + '\n')
mapf.write(','.join(units[k] for k in var_order) + '\n')
# Finally write the values.
for i in range(size_check):
formatted_values = ['{:.6G}'.format(converters[k](variables[k][i])) for k in var_order]
mapf.write(','.join(formatted_values))
if i < size_check - 1:
mapf.write('\n')
def vmr_file_name(obs_date, lon, lat, keep_latlon_prec=False):
"""
Construct the standard filename for a .vmr file produced by this code
:param obs_date: the datetime of the profiles
:type obs_date: datetime-like
:param lon: the longitude of the profiles.
:type lon: float
:param lat: the latitude of the profiles
:type lat: float
:param keep_latlon_prec: by default, lat and lon are rounded to the nearest whole number. Set this to ``True`` to
keep 2 decimal places of precision.
:type keep_latlon_prec: bool
:return: the .vmr file name, with format "JLv_yyyymmddhh_XX[NS]_YYY[EW].vmr" where "v" is the major version,
"yyyymmddhh" the date/time, XX[NS] the latitude and YYY[EW] the longitude.
:rtype: str
"""
prec = 2 if keep_latlon_prec else 0
lat = format_lat(lat, prec=prec)
lon = format_lon(lon, prec=prec, zero_pad=True)
major_version = const.priors_version.split('.')[0]
return 'JL{ver}_{date}_{lat}_{lon}.vmr'.format(ver=major_version, date=obs_date.strftime('%Y%m%d%H'),
lat=lat, lon=lon)
def write_vmr_file(vmr_file, tropopause_alt, profile_date, profile_lat, profile_alt, profile_gases, gas_name_order=None):
"""
Write a new-style .vmr file (without seasonal cycle, secular trends, and latitudinal gradients
:param vmr_file: the path to write the .vmr file ar
:type vmr_file: str
:param tropopause_alt: the altitude of the tropopause, in kilometers
:type tropopause_alt: float
:param profile_date: the date of the profile
:type profile_date: datetime-like
:param profile_lat: the latitude of the profile (south is negative)
:type profile_lat: float
:param profile_alt: the altitude levels that the profiles are defined on, in kilometers
:type profile_alt: array-like
:param profile_gases: a dictionary of the prior profiles to write to the .vmr file.
:type profile_gases: dict(array)
:param gas_name_order: optional, a list/tuple specifying what order the gases are to be written in. If not given,
they will be written in whatever order the iteration through ``profile_gases`` defaults to. If given, then an
error is raised if any of the gas names listed here are not present in ``profile_gases`` (comparison is case-
insensitive). Any gases not listed here that are in ``profile_gases`` are skipped.
:type gas_name_order: list(str)
:return: none, writes the .vmr file.
"""
if np.ndim(profile_alt) != 1:
raise ValueError('profile_alt must be 1D')
if gas_name_order is None:
gas_name_order = [k for k in profile_gases.keys()]
gas_name_order_lower = [name.lower() for name in gas_name_order]
gas_name_mapping = {k: None for k in gas_name_order}
# Check that all the gases in the profile_gases dict are expected to be written.
for gas_name, gas_data in profile_gases.items():
if gas_name.lower() not in gas_name_order_lower:
logger.warning('Gas "{}" was not listed in the gas name order and will not be written to the .vmr '
'file'.format(gas_name))
elif np.shape(gas_data) != np.shape(profile_alt):
raise ValueError('Gas "{}" has a different shape ({}) than the altitude data ({})'.format(
gas_name, np.shape(gas_data), np.shape(profile_alt)
))
elif np.ndim(gas_data) != 1:
raise ValueError('Gas "{}" is not 1D'.format(gas_name))
else:
idx = gas_name_order_lower.index(gas_name.lower())
gas_name_mapping[gas_name_order[idx]] = gas_name
# Write the header, which starts with the number of header lines and data columns, then has the tropopause altitude,
# profile date as a decimal year, and profile latitude. I'm going to skip the secular trends, seasonal cycle, and
# latitude gradient because those are not necessary.
alt_fmt = '{:9.3f} '
gas_fmt = '{:.3E} '
table_header = ['Altitude'] + ['{:10}'.format(name) for name in gas_name_order]
header_lines = [' ZTROP_VMR: {:.1f}'.format(tropopause_alt),
' DATE_VMR: {:.3f}'.format(date_to_decimal_year(profile_date)),
' LAT_VMR: {:.2f}'.format(profile_lat),
' '.join(table_header)]
with open(vmr_file, 'w') as fobj:
_write_header(fobj, header_lines, len(gas_name_order) + 1)
for i in range(np.size(profile_alt)):
fobj.write(alt_fmt.format(profile_alt[i]))
for gas_name in gas_name_order:
if gas_name_mapping[gas_name] is not None:
gas_conc = profile_gases[gas_name_mapping[gas_name]][i]
else:
gas_conc = 0.0
fobj.write(gas_fmt.format(gas_conc))
fobj.write('\n')
def read_vmr_file(vmr_file, as_dataframes=False, lowercase_names=True, style='new'):
nheader = get_num_header_lines(vmr_file)
if style == 'new':
last_const_line = nheader - 1
old_style = False
elif style == 'old':
last_const_line = 4
old_style = True
else:
raise ValueError('style must be one of "new" or "old"')
header_data = dict()
with open(vmr_file, 'r') as fobj:
# Skip the line with the number of header lines and columns
fobj.readline()
for i in range(1, last_const_line):
line = fobj.readline()
const_name, const_val = [v.strip() for v in line.split(':')]
if lowercase_names:
const_name = const_name.lower()
header_data[const_name] = float(const_val)
prior_info = dict()
if old_style:
for i in range(last_const_line, nheader-1, 2):
category_line = fobj.readline()
category = re.split(r'[:\.]', category_line)[0].strip()
data_line = fobj.readline()
data_line = data_line.split(':')[1].strip()
split_data_line = re.split(r'\s+', data_line)
prior_info[category] = np.array([float(x) for x in split_data_line])
data_table = pd.read_csv(vmr_file, sep='\s+', header=nheader-1)
if lowercase_names:
data_table.columns = [v.lower() for v in data_table]
if as_dataframes:
header_data = pd.DataFrame(header_data, index=[0])
# Rearrange the prior info dict so that the data frame has the categories as the index and the species as the
# columns.
categories = list(prior_info.keys())
tmp_prior_info = dict()
for i, k in enumerate(data_table.columns.drop('altitude')):
tmp_prior_info[k] = np.array([prior_info[cat][i] for cat in categories])
prior_info = pd.DataFrame(tmp_prior_info, index=categories)
else:
# use an ordered dict to ensure we keep the order of the gases. This is important if we use this .vmr file as
# a template to write another .vmr file that gsetup.f can read.
data_table = OrderedDict([(k, v.to_numpy()) for k, v in data_table.items()])
return {'scalar': header_data, 'profile': data_table, 'prior_info': prior_info}
def format_lon(lon, prec=2, zero_pad=False):
"""
Convert longitude between string and numeric representations.
If ``lon`` is a number, then it is converted to a string. The string will be the absolute value with "W" or "E" at
the end to indicate west (<= 0) or east (> 0). If given a string in that format, it converts it to a number.
:param lon: the longitude to convert
:type lon: float or str
:param prec: the precision after the decimal point to use. Only has an effect when converting float to string.
:type prec: int
:param zero_pad: set to ``True`` to zero pad the longitude string so that there are 3 digits before the decimal
place. Only has an effect when converting float to string.
:return: the formatted longitude string or the float representation of the longitude, with west being negative.
:rtype: str or float
"""
def to_str(lon):
ew = 'E' if lon > 0 else 'W'
# In Python float format specification, "0X.Y" means to zero pad so that there's X total characters and Y after
# the decimal point. We want lon zero padded to have three numbers before the decimal point, so the total width
# needs to be the precision + 4 if there will be a decimal point otherwise just 3.
width = prec + 4 if prec > 0 else prec + 3
pad = '0{}'.format(width) if zero_pad else ''
fmt_str = '{{:{padding}.{prec}f}}{{}}'.format(padding=pad, prec=prec)
return fmt_str.format(abs(lon), ew)
def to_float(lon):
if lon[-1] == 'E':
sign = 1
elif lon[-1] == 'W':
sign = -1
else:
raise ValueError('A longitude string must end in "E" or "W"')
return float(lon[:-1]) * sign
if isinstance(lon, str):
return to_float(lon)
else:
if lon > 180:
lon -= 360
return to_str(lon)
def find_lon_substring(string, to_float=False):
"""
Find a longitude substring in a string.
A longitude substring will match \d+[EW] or \d+\.\d+[EW].
:param string: the string to search for the longitude substring
:type string: str
:param to_float: when ``True``, converts the longitude to a float value using :func:`format_lon`, else returns the
string itself.
:type to_float: bool
:return: the longitude substring or float value
:rtype: str or float
"""
# search for one or more numbers, which may include a decimal point followed by at least one number then E or W.
lon_re = r'\d+(\.\d+)?[EW]'
lon_str = re.search(lon_re, string).group()
if to_float:
return format_lon(lon_str)
else:
return lon_str
def format_lat(lat, prec=2, zero_pad=False):
"""
Convert latitude between string and numeric representations.
If ``lat`` is a number, then it is converted to a string. The string will be the absolute value with "N" or "S" at
the end to indicate south (<= 0) or north (> 0). If given a string in that format, it converts it to a number.
:param lat: the latitude to convert
:type lat: float or str
:param prec: the precision after the decimal point to use. Only has an effect when converting float to string.
:type prec: int
:param zero_pad: set to ``True`` to zero pad the latitude string so that there are 2 digits before the decimal
place. Only has an effect when converting float to string.
:return: the formatted latitude string or the float representation of the latitude, with south being negative.
:rtype: str or float
"""
def to_str(lat):
ns = 'N' if lat > 0 else 'S'
# In Python float format specification, "0X.Y" means to zero pad so that there's X total characters and Y after
# the decimal point. We want lat zero padded to have two numbers before the decimal point, so the total width
# needs to be the precision + 3 if there will be a decimal point otherwise just 2.
width = prec + 3 if prec > 0 else prec + 2
pad = '0{}'.format(width) if zero_pad else ''
fmt_str = '{{:{padding}.{prec}f}}{{}}'.format(padding=pad, prec=prec)
return fmt_str.format(abs(lat), ns)
def to_float(lat):
if lat[-1] == 'N':
sign = 1
elif lat[-1] == 'S':
sign = -1
else:
raise ValueError('A latitude string must end in "N" or "S"')
return float(lat[:-1]) * sign
if isinstance(lat, str):
return to_float(lat)
else:
return to_str(lat)
def find_lat_substring(string, to_float=False):
"""
Find a latitude substring in a string.
A latitude substring will match \d+[NS] or \d+\.\d+[NS].
:param string: the string to search for the latitude substring
:type string: str
:param to_float: when ``True``, converts the latitude to a float value using :func:`format_lat`, else returns the
string itself.
:type to_float: bool
:return: the latitude substring or float value
:rtype: str or float
"""
# search for one or more numbers, which may include a decimal point followed by at least one number then N or S.
lat_re = r'\d+(\.\d+)?[NS]'
lat_str = re.search(lat_re, string).group()
if to_float:
return format_lat(lat_str)
else:
return lat_str
def find_datetime_substring(string, out_type=str):
"""
Extract a date/time substring from a string.
This assumes that the date/time is formatted as %Y%m%d (YYYYMMDD) or %Y%m%d_%H%M (YYYYMMDD_hhmm).
:param string: the string to search for the date/time substring.
:type string: str
:param out_type: what type to return the date/time as. Default is to return the string. If another type is passed,
then it must have a ``strptime`` class method that accepts the string to parse and the format string as arguments,
i.e. it must behave like :func:`datetime.datetime.strptime`.
:type out_type: type
:return: the string or parsed datetime value
"""
date_re = r'\d{8}(_\d{4})?'
date_str = re.search(date_re, string).group()
if out_type is str:
return date_str
else:
date_fmt = '%Y%m%d' if len(date_str) == 8 else '%Y%m%d_%H%M'
return out_type.strptime(date_str, date_fmt)
def _hg_dir_helper(hg_dir):
if hg_dir is None:
hg_dir = os.path.dirname(__file__)
return os.path.abspath(hg_dir)
def hg_commit_info(hg_dir=None):
hg_dir = _hg_dir_helper(hg_dir)
if len(hg_dir) == 0:
# If in the current directory, then dirname(__file__) gives an empty string, which isn't allowed as the argument
# to cwd in check_output
hg_dir = '.'
# Get the last commit (-l 1) in the current branch (-f)
summary = subprocess.check_output(['hg', 'log', '-f', '-l', '1'], cwd=hg_dir).splitlines()
log_dict = dict()
# Since subprocess returns a bytes object (at least on Linux) rather than an encoded string object, all the strings
# below must be bytes, not unicode strings
for line in summary:
splitline = line.split(b':', 1)
if len(splitline) < 2:
continue
k, v = splitline
log_dict[k.strip()] = v.strip()
parent = re.search(b'(?<=:)\\w+', log_dict[b'changeset']).group()
# In Mercurial, if on the default branch, then log does not include a branch name in the output
branch = log_dict[b'branch'] if b'branch' in log_dict else b'default'
parent_date = log_dict[b'date']
# Convert to unicode strings to avoid them getting formatted as "b'abc'" or "b'default'" in unicode strings
return parent.decode('utf8'), branch.decode('utf8'), parent_date.decode('utf8')
def hg_is_commit_clean(hg_dir=None, ignore_untracked=True, ignore_files=tuple()):
"""
Checks if a mercurial directory is clean.
By default, a directory is considered clean if all tracked files have no uncommitted changes. Untracked files are
not considered. Setting ``ignore_untracked`` to ``False`` means that there must be no untracked files for the
directory to be clean.
:param hg_dir: optional, the mercurial directory to check. If not given, defaults to the one containing this repo.
:type hg_dir: str
:param ignore_untracked: optional, set to ``False`` to require that there be no untracked files in the directory for
it to be considered clean.
:type ignore_untracked: bool
:return: ``True`` if the directory is clean, ``False`` otherwise.
:rtype: bool
"""
hg_dir = _hg_dir_helper(hg_dir)
hg_root = subprocess.check_output(['hg', 'root'], cwd=hg_dir).strip()
summary = subprocess.check_output(['hg', 'status'], cwd=hg_dir).splitlines()
def in_ignore(f):
f = os.path.join(hg_root, f)
for ignore in ignore_files:
if os.path.exists(ignore) and os.path.samefile(f, ignore):
return True
return False
# Since subprocess returns a bytes object (at least on Linux) rather than an encoded string object, all the strings
# below must be bytes, not unicode strings
for line in summary:
status, hg_file = [p.strip() for p in line.split(b' ', 1)]
if ignore_untracked and status == b'?':
pass
elif in_ignore(hg_file):
pass
else:
return False
return True
def _lrange(*args):
# Ensure Python 3 compatibility for adding range() calls together
r = range(*args)
if not isinstance(r, list):
r = list(r)
return r
def round_to_zero(val):
sign = np.sign(val)
return np.floor(np.abs(val)) * sign
def calculate_model_potential_temperature(temp, pres_levels=_std_model_pres_levels):
"""
Calculate potental temperature for model output on fixed pressure levels.
:param temp: The absolute temperature (in K) on the model grid.
:type temp: array-like
:param pres_levels: the pressure levels that the temperature is defined on. Must be a 1D vector, i.e. all columns in
the model must be on the same pressure levels. A standard set of pressure for GEOS FP is the default.
:type pres_levels: vector-like
:return: the potential temperature
"""
if temp.ndim != 4:
raise ValueError('temp expected to be 4D')
ntime, nlev, nlat, nlon = temp.shape
if nlev != pres_levels.size:
raise ValueError('Number of levels in temp != number of pressure levels defined')
pres = np.tile(pres_levels[np.newaxis, :, np.newaxis, np.newaxis], (ntime, 1, nlat, nlon))
return calculate_potential_temperature(pres, temp)
def calculate_potential_temperature(pres, temp):
"""
Calculate potential temperature.
:param pres: Pressure in millibars/hPa.
:param temp: Absolute temperature in Kelvin.
:return: the potential temperature corresponding to those T/P coordinates.
"""
return temp * (1000/pres) ** 0.286
def convert_geos_eta_coord(delp):
"""
Calculate the pressure grid for a GEOS native file.
:param pres: DELP (pressure thickness) array in Pa. May be any number of
dimensions, as long as exactly one has a length of 72.
:return: the pressure level midpoints, in hPa. Note the unit change, this
is because the GEOS DELP variable is usually in Pa, but hPa is the standard
unit for pressure levels in the Np files.
"""
dp_shape = np.array(delp.shape)
try:
i_ax = np.flatnonzero(dp_shape == 72).item()
except ValueError:
raise ValueError('delp is either missing its 72 level dimension or has multiple dimensions with length 72')
# From pg. 7 of the GEOS FP document (https://gmao.gsfc.nasa.gov/GMAO_products/documents/GEOS_5_FP_File_Specification_ON4v1_2.pdf)
# the top pressure is always 0.01 hPa. Since the columns are space-to-surface, we add the cumulative sum to get the
# level bottom pressure, then take the average along that axis to get the middle pressure
level_shape = dp_shape.copy()
level_shape[:i_ax+1] = 1
top_p = 0.01
top_p_slice = np.full(level_shape, top_p)
delp = delp * 0.01 # assume input is in Pa, want hPa
p_edge = top_p + np.cumsum(delp, axis=i_ax)
p_edge = np.concatenate([top_p_slice, p_edge], axis=i_ax)
# Move the vertical axis to the front to do the averaging so we can just always average along the first dimension
p_edge = np.rollaxis(p_edge, i_ax, 0)
p_mid = 0.5 * (p_edge[:-1] + p_edge[1:])
return np.rollaxis(p_mid, i_ax, 0)
def _construct_grid(*part_defs):
"""
Helper function to construct coordinates for a 1D grid
:param part_defs: a sequence of tuples (or lists) defining the start, stop, and step for each grid component. Each
tuple gets expanded as the arguments to :func:`numpy.arange`.
:return: the coordinates, sorted, and made sure to be unique
"""
grid_components = [np.arange(*part) for part in part_defs]
# Keep only unique values and sort them
return np.unique(np.concatenate(grid_components))
# Compute area of each grid cell and the total area
def calculate_area(lat, lon, lat_res=None, lon_res=None, muted=False):
"""
Calculate grid cell area for an equirectangular grid.
:param lat: the vector of grid cell center latitudes, in degrees
:param lon: the vector of grid cell center longitudes, in degrees
:param lat_res: the width of a single grid cell in the latitudinal direction, in degrees. If omitted, will be
calculated from the lat vector.
:param lon_res: the width of a single grid cell in the longitudinal direction, in degrees. If omitted, will be
calculated from the lat vector.
:return: 2D array of areas, in units of fraction of Earth's surface area.
"""
def calculate_resolution(coord_vec, coord_name):
res = np.diff(coord_vec)
if not np.all(res - res[0] < 0.001):
raise RuntimeError('Could not determine a unique {} resolution'.format(coord_name))
return res[0]
nlat = lat.size
nlon = lon.size
if lat_res is None:
lat_res = calculate_resolution(lat, 'lat')
if lon_res is None:
lon_res = calculate_resolution(lon, 'lon')
lat_half_res = 0.5 * lat_res
area = np.zeros([nlat, nlon])
for j in range(nlat):
Slat = lat[j]-lat_half_res
Nlat = lat[j]+lat_half_res
Slat = np.deg2rad(Slat)
Nlat = np.deg2rad(Nlat)
for i in range(nlon):
area[j, i] = np.deg2rad(lon_res)*np.abs(np.sin(Slat)-np.sin(Nlat))
if abs(np.sum(area) - 4*np.pi) > 0.0001: # ensure proper normalization so the total area of Earth is 4*pi
if not muted:
print('Total earth area is {:g} not 4pi (difference of {:g}), normalizing to 4pi.'
.format(np.sum(area), np.sum(area) - 4*np.pi))
area *= 4*np.pi/np.sum(area)
return area
def calculate_eq_lat_on_grid(EPV, PT, area):
"""
Calculate equivalent latitude on a 4D grid.
:param EPV: the potential vorticity on the 4D grid.
:type EPV: :class:`numpy.ndarray`
:param PT: the potential temperature on the 4D grid.
:type PT: :class:`numpy.ndarray`
:param area: the 2D grid of surface area (in steradians) that corresponds to the 2D slices of the 4D grid.
:type area: :class:`numpy.ndarray`
:return: equivalent latitude
:rtype: :class:`numpy.ndarray`
"""
EL = np.full_like(PT, np.nan)
for itime in range(PT.shape[0]):
interpolator = calculate_eq_lat(EPV[itime], PT[itime], area)
# This is probably going to be horrifically slow - but interp2d sometimes gives weird results when called with
# vectors, so unfortunately we have to call this one element at a time
pbar = ProgressBar(PT[itime].size, prefix='Calculating eq. lat for time {}/{}:'.format(itime, PT.shape[0]),
style='counter')
for i in range(PT[itime].size):
pbar.print_bar(i)
ilev, ilat, ilon = np.unravel_index(i, PT[itime].shape)
this_pt = PT[itime, ilev, ilat, ilon]
this_epv = EPV[itime, ilev, ilat, ilon]
EL[itime, ilev, ilat, ilon] = interpolator(this_epv, this_pt)[0]
return EL
def calculate_eq_lat(EPV, PT, area):
"""
Construct an interpolator for equivalent latitude.
:param EPV: a 3D grid of potential vorticity
:type EPV: :class:`numpy.ndarray`
:param PT: a 3D grid of potential temperature
:type PT: :class:`numpy.ndarray`
:param area: the 2D grid of surface area (in steradians) that corresponds to the 2D slices of the 4D grid.
:type area: :class:`numpy.ndarray`
:return: a 2D interpolator for equivalent latitude, requires potential vorticity and potential temperature as inputs
:rtype: :class:`scipy.interpolate.interp2d`
Note: when querying the interpolator for equivalent latitude, it is often best to call it with scalar values, even
though that is slower than calling it with the full vector of PV and PT that you wish to get EL for. The problem is
that scipy 2D interpolators, when given vectors as input, return a grid. This would be fine, except that the values
corresponding to the vector of PV and PT are not always along the diagonal and so cannot be extracted with
:func:`numpy.diag`. (I suspect what is happening is that the interpolator sorts the input values when constructing
the grid, but I have not tested this. -JLL)
"""
nlev, nlat, nlon = PT.shape
# Get rid of fill values, this fills the bottom of profiles with the first valid value
PT[PT > 1e4] = np.nan
EPV[EPV > 1e8] = np.nan
for i in range(nlat):
pd.DataFrame(PT[:, i, :]).fillna(method='bfill', axis=0, inplace=True)
pd.DataFrame(EPV[:, i, :]).fillna(method='bfill', axis=0, inplace=True)
# Define a fixed potential temperature grid, with increasing spacing
# this is done arbitrarily to get sufficient levels for the interpolation to work well, and not too much for the
# computations to take less time
if np.min(PT) > 300 or np.max(PT) < 1000:
raise ValueError('Potential temperature range is smaller than the [300, 1000] K assumed to create the '
'interpolation grid')
theta_grid = _construct_grid((round_to_zero(np.nanmin(PT)), 300.0, 2), (300.0, 350.0, 5.0), (350.0, 500.0, 10.0),
(500.0, 750.0, 20.0), (750.0, 1000.0, 30.0), (1000.0, round_to_zero(np.nanmax(PT)), 100.0))
new_nlev = np.size(theta_grid)
# Get PV on the fixed PT levels ~ 2 seconds per date
new_EPV = np.zeros([new_nlev, nlat, nlon])
for i in range(nlat):
for j in range(nlon):
new_EPV[:, i, j] = np.interp(theta_grid, PT[:, i, j], EPV[:, i, j])
# Compute equivalent latitudes
EL = np.zeros([new_nlev, 100])
EPV_thresh = np.zeros([new_nlev, 100])
for k in range(new_nlev): # loop over potential temperature levels
maxPV = np.max(new_EPV[k]) # global max PV
minPV = np.min(new_EPV[k]) # global min PV
# define 100 PV values between the min and max PV
EPV_thresh[k] = np.linspace(minPV,maxPV,100)
for l,thresh in enumerate(EPV_thresh[k]):
area_total = np.sum(area[new_EPV[k]>=thresh])
EL[k,l] = np.arcsin(1-area_total/(2*np.pi))*90.0*2/np.pi
# Define a fixed potential vorticity grid, with increasing spacing away from 0
# The last term should ensure that 0 is in the grid
pv_grid = _construct_grid((round_to_zero(np.nanmin(EPV_thresh-50.0)), -1000.0, 50.0), (-1000.0, -500.0, 20.0), (-500.0, -100.0, 10.0),
(-100.0, -10.0, 1.0), (-10.0, -1.0, 0.1), (-1.0, 1.0, 0.01), (1.0, 10.0, 0.1),
(10.0, 100.0, 1.0), (100.0, 500.0, 10.0), (500.0, 1000.0, 20.0),
(1000.0, round_to_zero(np.nanmax(EPV_thresh))+50.0, 50.0), (0.0, 0.1))
# Generate interpolating function to get EL for a given PV and PT
interp_EL = np.zeros([new_nlev,len(pv_grid)])
for k in range(new_nlev):
interp_EL[k] = np.interp(pv_grid,EPV_thresh[k],EL[k])
return interp2d(pv_grid, theta_grid, interp_EL)
def get_eqlat_profile(interpolator, epv, theta):
el = np.full_like(epv, np.nan)
for i, (pv, pt) in enumerate(zip(epv, theta)):
el[i] = interpolator(pv, pt)
return el
def _format_geosfp_name(product, file_type, levels, date_time, add_subdir=False):
"""
Create the file name for a GEOS FP or FP-IT file.
:param product: which GEOS product ('fp' or 'fpit') to use
:param file_type: which file type ('met' for meteorology, 'chm' for chemistry) to use
:type file_type: str
:param levels: which levels ('surf', 'p', or 'eta') to use
:type levels: str
:param date_time: the date and time of the desired file. The hour should be a multiple of 3.
:type date_time: datetime-like
:param add_subdir: if ``True``, then the correct subdirectory will be prepended.
:type add_subdir: bool
:return: the file name
:rtype: str
"""
product_patterns = {'fp': 'GEOS.fp.asm.inst3_{dim}d_{vars}_{type}.{date_time}.V01.nc4',
'fpit': 'GEOS.fpit.asm.inst3_{dim}d_{vars}_{type}.GEOS5124.{date_time}.V01.nc4'}
level_mapping = {'surf': 'Nx', 'p': 'Np', 'eta': 'Nv'}
level_dims = {'Np': 3, 'Nx': 2, 'Nv': 3}
var_types = {'met': 'asm', 'chm': 'chm'}
try:
pattern = product_patterns[product]
except KeyError:
raise ValueError('product "{}" has not been defined. Allowed values are: {}'
.format(product, ', '.join(product_patterns.keys())))
try:
levels = level_mapping[levels]
except KeyError:
raise ValueError('levels "{}" not recognized. Allowed values are: {}'
.format(levels, ', '.join(level_mapping.keys())))
try:
dims = level_dims[levels]
except KeyError:
raise ValueError('file_type "{}" is not recognized. Allowed values are: {}'
.format(file_type, ', '.join(level_dims.keys())))
date_time = date_time.strftime('%Y%m%d_%H%M')
fname = pattern.format(dim=dims, type=levels, date_time=date_time, vars=var_types[file_type])
if add_subdir:
fname = os.path.join(levels, fname)
return fname
def read_geos_files(start_date, end_date, geos_path, profile_variables, surface_variables, product='fpit',
keep_time_dim=True, concatenate_arrays=False, set_mask_to_nan=False):
"""
Read GEOS FP or FP-IT files between specified dates.
:param start_date: the first date to read GEOS files from
:type start_date: datetime-like
:param end_date: the last date to read GEOS file from (exclusive). Note that if this datetime is not exactly a time
that GEOS files are produced on, you will lose an extra file. For example, since GEOS files are produced every 3
hours, if you specify ``end_date = datetime.datetime(2012, 1, 1, 23)``, you will lose the file from
21:00 UTC 1 Jan 2012,
:type end_date: datetime-like
:param geos_path: the path where the GEOS files are stored. Must have subdirectories 'Np' and 'Nx' for the profile
and surface files, respectively. Currently, each of these subdirectories must be flat, meaning that all GEOS data
from all times is stored at the top level, not organized into further subdirectories by year/month etc.
:type geos_path: str
:param profile_variables: a list of variables to read from the profile (Np) files. 'lon', 'lat', and 'lev' are
always read.
:type profile_variables: list(str)
:param surface_variables: a list of variables to read from the surface (Nx) files. 'lon' and 'lat' are always read.
:type surface_variables: list(str)
:param product: one of the strings 'fp' or 'fpit', determine which GEOS product is being read.
:type product: str
:param keep_time_dim: Set to ``True`` to keep the time dimension of the variables. This means the profile and
surface variables will be 4D and 3D respectively. Set to ``False`` to remove it (so they will be 3D and 2D).
:type keep_time_dim: bool
:param concatenate_arrays: Set to ``True`` to concatenate the data from different files into a single array. This
requires ``keep_time_dim`` to be ``True`` since they are concatenated along the time dimension. If ``False``, then
the variables are left as lists of numpy arrays, where each array comes from a separate file.
:type concatenate_arrays: bool
:param set_mask_to_nan: Set to ``True`` turn the masked arrays read from netCDF files by default into regular numpy
arrays, where the masked values are replaced with NaNs.
:type set_mask_to_nan: bool
:return: dictionaries of profile and surface variables, and a Pandas DateTimeIndex of the file dates. The variable
dictionaries' values' format depends on the value of ``concatenate_arrays``.
:rtype: dict, dict, DatetimeIndex
"""
def read_var_helper(nchandle, varname, keep_time=keep_time_dim):
if keep_time:
data = nchandle.variables[varname][:]
else:
# This is equivalent to doing nchandle.variables[varname][0,:,:,:] for a 4D variable; omitting the trailing
# colons makes it general for any size array.
data = nchandle.variables[varname][0]
if set_mask_to_nan:
data = data.filled(np.nan)
return data
def read_files_helper(file_list, variables, is_profile):
var_data = {v: [] for v in variables}
for fidx, fname in enumerate(file_list):
with ncdf.Dataset(fname, 'r') as nchandle:
# Always read lat/lon. If reading a profile file, get the levels too for the vertical coordinate.
lon = read_var_helper(nchandle, 'lon', keep_time=True)
lat = read_var_helper(nchandle, 'lat', keep_time=True)
if is_profile:
lev = read_var_helper(nchandle, 'lev', keep_time=True)
# If on the first file, store the coordinate variables. If on a later file, double check that the
# coordinates are the same. They should be, and that assumption makes the data easier to work with
# since we don't have to recheck our indices for each file.
if fidx == 0:
var_data['lon'] = lon
var_data['lat'] = lat
if is_profile:
var_data['lev'] = lev
else:
chk = not ma.allclose(var_data['lon'], lon) or not ma.allclose(var_data['lat'], lat)
if is_profile:
chk = chk or not ma.allclose(var_data['lev'], lev)
if chk:
# TODO: replace this with proper GEOS error from the backend analysis
raise RuntimeError('lat, lon, and/or lev are inconsistent among the GEOS files')
for var in variables:
var_data[var].append(read_var_helper(nchandle, var))
if concatenate_arrays:
for var, data in var_data.items():
if isinstance(data, list):
# The lon/lat/lev variables don't need concatenated, we checked that they don't change with time
# so there's only one array for them, not a list.
var_data[var] = concatenate(data, axis=0)
return var_data
# input checking - we concatenate along the time dimension, so we better keep it
if concatenate_arrays and not keep_time_dim:
raise ValueError('concatenate_arrays = True requires keep_time_dim = True')
# If we're converting the default masked arrays to regular arrays, we have to use np.concatenate because
# ma.concatenate always returns a masked array. If we're not converting, then the reverse applied.
if set_mask_to_nan:
concatenate = np.concatenate
else:
concatenate = ma.concatenate
geos_prof_files, file_dates = geosfp_file_names(product, 'met', 'p', start_date, end_date)
geos_surf_files, surf_file_dates = geosfp_file_names(product, 'met', 'surf', start_date, end_date)
# Check that the file lists have the same dates
if len(file_dates) != len(surf_file_dates) or any(file_dates[i] != surf_file_dates[i] for i in range(len(file_dates))):
raise RuntimeError('Somehow listed different profile and surface files')
elif concatenate_arrays:
file_dates = pd.DatetimeIndex(file_dates)
geos_prof_files = [os.path.join(geos_path, 'Np', f) for f in geos_prof_files]
geos_surf_files = [os.path.join(geos_path, 'Nx', f) for f in geos_surf_files]
prof_data = read_files_helper(geos_prof_files, profile_variables, is_profile=True)
surf_data = read_files_helper(geos_surf_files, surface_variables, is_profile=False)
return prof_data, surf_data, file_dates
def geosfp_file_names(product, file_type, levels, start_date, end_date=None):
"""
List all file names for GEOS FP or FP-IT files for the given date(s).
:param product: which GEOS product ('fp' or 'fpit') to use
:type product: str
:param file_type: which file type ('met' for meteorology, 'chm' for chemistry) to use
:type file_type: str
:param levels: which levels ('surf', 'p', or 'eta') to use
:type levels: str
:param start_date: what date to start listing files for. If ``end_date`` is omitted, only the file for this date
will be listed. Note that the hour must be a multiple of 3, since GEOS files are produced every three hours.
:type start_date: datetime-like
:param end_date: what date to stop list files. This is exclusive, and will not itself be included. Can be omitted
to just list one file.
:type end_date: None or datetime-like
:return: the list of file names and an array of file dates
:rtype: list, :class:`pandas.DatetimeIndex`
"""
freq = pd.Timedelta(hours=3)
if start_date.hour % 3 != 0:
raise ValueError('The hour of start_date must be a multiple of 3')
if end_date is None:
end_date = start_date + freq
geos_file_dates = pd.date_range(start=start_date, end=end_date - freq, freq=freq)
geos_file_names = []
for date in geos_file_dates:
this_name = _format_geosfp_name(product, file_type, levels, date)
geos_file_names.append(this_name)
return geos_file_names, geos_file_dates
def geosfp_file_names_by_day(product, file_type, levels, utc_dates, utc_hours=None, add_subdir=False):
"""
Create a list of GEOS-FP file names for specified dates
This differs from :func:`geosfp_file_names` because this function can list files for only specific hours across
multiple days. For example, if you want only 00:00 UTC FP-IT profile files for all of 2018, you would call this as::
geosfp_file_names_by_day('fpit', 'Np', pd.date_range('2018-01-01', '2018-12-31', utc_hours=[0])
:param product: which GEOS-FP product to make names for: "fp" or "fpit"
:type product: str
:param file_type: which file type ('met' for meteorology, 'chm' for chemistry) to use
:type file_type: str
:param levels: which levels ('surf', 'p', or 'eta') to use
:type levels: str
:param utc_dates: Dates (on UTC time) to read files for.
:type utc_dates: collection(datetime) or collection(datetime-like objects)
:param utc_hours: Which hours of the day to use (in UTC). If ``None``, then all hours that GEOS is produced on is
used (every 3 hours). Otherwise, pass a collection of integers to specify a subset of hours to use. (e.g.
[0, 3, 6, 9] to only use the files from the first half of each day).
:type utc_hours: None or collection(int)
:param add_subdir: if ``True``, then the correct subdirectory will be prepended.
:type add_subdir: bool
:return: a list of GEOS file names
:rtype: list(str)
"""
geos_utc_hours = np.arange(0, 24, 3)
if utc_hours is not None:
geos_utc_hours = geos_utc_hours[np.isin(geos_utc_hours, utc_hours)]
geos_file_names = []
geos_file_dates = []
for date in utc_dates:
for hr in geos_utc_hours:
date_time = dt.datetime(date.year, date.month, date.day, hr)
this_name = _format_geosfp_name(product, file_type, levels, date_time, add_subdir=add_subdir)
geos_file_names.append(this_name)
geos_file_dates.append(date_time)
return geos_file_names, geos_file_dates
def datetime_from_geos_filename(geos_filename):
geos_filename = os.path.basename(geos_filename)
date_str = re.search(r'\d{8}_\d{4}', geos_filename).group()
return dt.datetime.strptime(date_str, '%Y%m%d_%H%M')
def is_geos_on_native_grid(geos_filename):
with ncdf.Dataset(geos_filename, 'r') as nch:
return nch['lev'].size == 72
def mod_interpolation_legacy(z_grid, z_met, t_met, val_met, interp_mode=1, met_alt_geopotential=True):
"""
Legacy interpolation for .mod file profiles onto the TCCON grid
:param z_grid: the altitude levels (in kilometers) to interpolate the values onto
:type z_grid: :class:`numpy.ndarray`
:param z_met: the altitude levels (in kilometers) of the input values
:type z_met: :class:`numpy.ndarray`
:param t_met: the absolute temperature (in Kelvin) on the same levels as the input values
:type t_met: :class:`numpy.ndarray`
:param val_met: the input values to be interpolated to the ``z_grid`` levels
:type val_met: :class:`numpy.ndarray`
:param interp_mode: how to do the interpolation. Mode ``1`` (default) is used in the original GGG code for water
vapor dry-air mole fraction. Recommended mode for anything relating to a concentration. Mode ``0`` is for
temperature only. Mode ``2`` is for pressure, or more generally, values with an exponential dependence on altitude.
:type interp_mode: int
:param met_alt_geopotential: if ``True``, in met altitudes are assumed to be heights based on geopotential, not
geometry. Therefore internally, the grid altitudes are slightly modified to be compatible. If ``False``, then the
met altitudes are assumed to be geometric, and the grid altitudes will not be scaled.
:type met_alt_geopotential: bool
:return: the input values, interpolated onto the ``z_grid`` altitudes.
:rtype: :class:`numpy.ndarray`
"""
if met_alt_geopotential:
z_grid = z_grid / (1 + z_grid/earth_radius)
val_grid = np.full(z_grid.shape, np.nan, dtype=val_met.dtype)
for i, z in enumerate(z_grid):
# Find the levels in the met data above and below the current grid level. If we're below the first met level,
# use the first two levels to extrapolate
i_below = np.argwhere(z_met < z)
if i_below.size == 0:
i_below = 0
else:
i_below = np.max(i_below)
i_above = i_below + 1
# Calculate beta, which is used as the interpolation factor
lapse_rate = (t_met[i_above] - t_met[i_below]) / (z_met[i_above] - z_met[i_below])
if lapse_rate * (t_met[i_above] - t_met[i_below]) > 0.01 * t_met[i_below]:
beta = np.log(1 + lapse_rate * (z - z_grid[i_below])/t_met[i_below]) / np.log(1 + lapse_rate*(z_met[i_below] - z_met[i_below]) / t_met[i_below])
else:
beta = (z - z_met[i_below]) / (z_met[i_above] - z_met[i_below])
# Different interpolation modes that come from the GGG2014 fortran code.
if interp_mode == 0:
# interp_mode = 0 is for temperature only, uses lapse rate directly
val_grid[i] = val_met[i_below] + lapse_rate * (z - z_met[i_below])
elif interp_mode == 1:
# interp_mode = 1 is for species concentrations
val_grid[i] = val_met[i_below] + beta * (val_met[i_above] - val_met[i_below])
elif interp_mode == 2:
# interp_mode = 2 is for pressure
val_grid[i] = val_met[i_below] * (val_met[i_above] / val_met[i_below])**beta
else:
raise ValueError('interp_mode = {} is not allowed. Must be 0, 1, or 2'.format(interp_mode))
return val_grid
def mod_interpolation_new(z_grid, z_met, vals_met, interp_mode='linear'):
"""
New method to interpolate met data onto the TCCON prior altitude grid
:param z_grid: the altitude levels to interpolate to.
:type z_grid: :class:`numpy.ndarray`
:param z_met: the altitude levels in the meteorology data
:type z_met: :class:`numpy.ndarray`
:param vals_met: the values to be interpolated, on the ``z_met`` levels
:type vals_met: :class:`numpy.ndarray`
:param interp_mode: how to do the interpolation:
* ``'linear'`` will do linear interpolation of ``vals_met`` with respect to z.
* ``'lin-log'`` linearly interpolate ln(``vals_met``) with respect to z.
* ``'log-lin'`` linearly interpolated ``vals_met`` with respect to ln(z).
* ``'log-log'`` linearly interpolate ln(``vals_met``) with respect to ln(z).
For compatibility with `mod_interpolation_legacy`, ``interp_mode`` may also be an integer. ``0`` or ``1`` are
aliases for ``'linear'`` and ``2`` is the same as ``'lin-log'``
:type interp_mode: str or int
:return: the values interpolated to the TCCON grid.
:rtype: :class:`numpy.ndarray`
"""
interp_mode_compat_mapping = {0: 'linear', 1: 'linear', 2: 'lin-log', 3: 'log-lin', 4: 'log-log'}
err_msg = 'interp_mode = {} is invalid. It must be one of the strings "{}" or one of the integers {}'.format(
interp_mode, '", "'.join(interp_mode_compat_mapping.values()),
', '.join([str(k) for k in interp_mode_compat_mapping.keys()])
)
if isinstance(interp_mode, int):
try:
interp_mode = interp_mode_compat_mapping[interp_mode]
except KeyError:
raise ValueError(err_msg)
elif interp_mode not in interp_mode_compat_mapping.values():
raise ValueError(err_msg)
interp_mode = interp_mode.lower()
do_log_x = re.match('^log-\w{3}', interp_mode)
do_log_y = re.match('\w{3}-log$', interp_mode)
if do_log_x:
z_grid = np.log(z_grid)
z_met = np.log(z_met)
if do_log_y:
vals_met = np.log(vals_met)
vals_interp = interp1d(z_met, vals_met, fill_value='extrapolate')
vals_grid = vals_interp(z_grid)
# If doing logarithmic interpolation for the y variable need to restore the output values. (x not returned so no
# need to restore.)
if do_log_y:
vals_grid = np.exp(vals_grid)
return vals_grid
def interp_tropopause_height_from_pressure(p_trop_met, p_met, z_met):
"""
Calculate the tropopause height by interpolating to the tropopause pressure
:param p_trop_met: the blended tropopause pressure from GEOS Nx files.
:type p_trop_met: float
:param p_met: the vector of pressure for this profile. Must be in the same units as ``p_trop_met``.
:type p_met: array-like
:param z_met: the vector of altitude levels for this profile.
:type z_met: array-like
:return: the tropopause altitude, in the same units as ``z_met``.
:rtype: float
"""
# The age-of-air calculation used for the tropospheric trace gas profile calculation needs the tropopause altitude.
# Previously we'd tried finding this by interpolating to the tropopause potential temperature, in order to be
# consistent about defining the strat/trop separation by potential temperature. However, potential temperature
# does not always behave in a manner that makes interpolating to it straightforward (e.g. it crosses the tropopause
# theta 0 or >1 times) so we just use pressure now.
z_trop_met = mod_interpolation_new(p_trop_met, p_met, z_met, 'log-lin')
if z_trop_met < np.nanmin(z_met):
raise RuntimeError('Tropopause altitude calculated to be below the bottom of the profile. Something has '
'gone horribly wrong.')
return z_trop_met
def calc_wmo_tropopause(temperature, altitude, limit_to=(5., 18.), raise_error=True):
"""
Find the tropopause altitude using the WMO definition
The WMO thermal definition of the tropopause is: "the level at which the lapse rate drops to < 2 K/km and the
average lapse rate between this and all higher levels within 2 km does not exceed 2 K/km".
(quoted in https://www.atmos-chem-phys.net/8/1483/2008/acp-8-1483-2008.pdf, sect. 2.4).
:param temperature: the temperature profile, in K
:type temperature: :class:`numpy.ndarray` (1D)
:param altitude: the altitude for each level in the temperature profile, in kilometers
:type altitude: :class:`numpy.ndarray` (1D)
:param limit_to: the range of altitudes to limit the search for the tropopause to. This both helps avoid erroneous
results and potentially speed up the analysis.
:type limit_to: tuple(float, float)
:param raise_error: If ``True``, this function raises an error if it cannot find the tropopause. If ``False``, it
returns a NaN in that case.
:type raise_error: bool
:return: the tropopause altitude in kilometers
:rtype: float
:raises TropopauseError: if ``raise_error`` is ``True`` and this cannot find the tropopause.
"""
# Calculate the lapse rate on the half levels. By definition, a positive lapse rate is a decrease with altitude, so
# we need the minus sign.
lapse = -np.diff(temperature) / np.diff(altitude)
alt_half = altitude[:-1] + np.diff(altitude)/2.0
# Cut down the data to just the relevant range of altitudes recommended by
# https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1029/2003GL018240 (end of sect. 2)
zz = (alt_half >= np.min(limit_to)) & (alt_half <= np.max(limit_to))
lapse = lapse[zz]
alt_half = alt_half[zz]
# Iterate over the levels. If the lapse rate is < 2 K/km, check that it remains there over the next 2 kilometers
for k, (gamma, alt) in enumerate(zip(lapse, alt_half)):
if gamma < 2.0:
step = 0.1
test_alt = np.arange(alt, alt+2.0+step, step)
test_lapse = np.interp(test_alt, alt_half, lapse)
if np.all(test_lapse < 2.0):
# Interpolate to the exact tropopause altitude where the lapse rate first crosses the 2 K/km
# np.interp requires the x-coordinates to be sorted, hence the complicated formula to get k_inds
k_inds = np.argsort(lapse[[k-1, k]]) + k - 1
return np.interp(2.0, lapse[k_inds], alt_half[k_inds])
# If we get here, we failed to find the tropopause, so return a NaN or raise an error
if raise_error:
raise TropopauseError('Could not find a level meeting the WMO tropopause condition in the given profile')
else:
return np.nan
def number_density_air(p, t):
"""
Calculate the ideal dry number density of air in molec. cm^-3
:param p: pressure in hPa
:type p: float or :class:`numpy.ndarray`
:param t: temperature in K
:type t: float or :class:`numpy.ndarray`
:return: ideal dry number density in molec. cm^-3
:rtype: float or :class:`numpy.ndarray`
"""
R = 8.314e4 # gas constant in cm^3 * hPa / (mol * K)
return p / (R*t) * 6.022e23
def effective_vertical_path(z, p=None, t=None, nair=None):
"""
Calculate the effective vertical path used by GFIT for a given z/P/T grid.
:param z: altitudes of the vertical levels. May be any unit, but note that the effective paths will be returned in
the same unit.
:type z: array-like
:param p: pressures of the vertical levels. Must be in hPa.
:type p: array-like
:param t: temperatures of the vertical levels. Must be in K.
:type t: array-like
:return: effective vertical paths in the same units as ``z``
:rtype: array-like
"""
def integral(dz_in, lrp_in, sign):
return dz_in * 0.5 * (1.0 + sign * lrp_in / 3 + lrp_in**2/12 + sign*lrp_in**3/60)
if nair is not None:
d = nair
elif p is not None and t is not None:
d = number_density_air(p, t)
else:
raise TypeError('Either nair or p & t must be given')
dz = np.concatenate([[0.0], np.diff(z), [0.0]])
log_rp = np.log(d[:-1] / d[1:])
log_rp = np.concatenate([[0.0], log_rp, [0.0]])
# from gfit/compute_vertical_paths.f, the calculation for level i is
# v_i = 0.5 * dz_{i+1} * (1 - l_{i+1}/3 + l_{i+1}**2/12 - l_{i+1}**3/60)
# + 0.5 * dz_i * (1 + l_i/3 + l_i**2/12 + l_i**3/60)
# where
# dz_i = z_i - z_{i-1}
# l_i = ln(d_{i-1}/d_i)
# The top level has no i+1 term. This vector addition duplicates that calculation. The zeros padded to the beginning
# and end of the difference vectors ensure that when there's no i+1 or i-1 term, it is given a value of 0.
vpath = integral(dz[1:], log_rp[1:], sign=-1) + integral(dz[:-1], log_rp[:-1], sign=1)
# TODO: handle the levels around the surface
return vpath
def get_ussa_for_alts(alts):
"""
Get temperature and pressure from the US standard atmosphere (USSA) for given altitudes.
Temperature is interpolated to the requested altitudes linearly, assuming that the lapse rate is constant between
the levels defined by the USSA. Pressure is interpolated exponentially, i.e. ln(p) is interpolated linearly.
:param alts: altitudes, in kilometers, to calculate T and P for.
:type alts: float or :class:`numpy.ndarray`
:return: temperatures (in K) and pressures (in hPa). Arrays will be the same shape as the input altitudes.
:rtype: float or :class:`numpy.ndarray`, float or :class:`numpy.ndarray`
"""
# Need to interpolate pressure and temperature to the given altitudes. Will assume that temperature varies linearly
# with altitude and pressure varies exponentially. Since p = p0 * exp(-z/H) then ln(p) = ln(p0) - z/H, therefore
# we will linearly interpolate ln(p) w.r.t. altitude.
z_coord = const.z_ussa
t_coord = const.t_ussa
p_coord = np.log(const.p_ussa)
interp_args = {'left': np.nan, 'right': np.nan}
t = np.interp(alts, z_coord, t_coord, **interp_args)
p = np.exp(np.interp(alts, z_coord, p_coord, **interp_args))
return t, p
def get_ussa_for_pres(pres):
"""
Get altitude and temperature from the US standard atmosphere (USSA) for given pressures.
Temperature is interpolated to the requested altitudes linearly, assuming that the lapse rate is constant between
the levels defined by the USSA. Pressure is interpolated exponentially, i.e. ln(p) is interpolated linearly.
:param pres: pressures, in hPa, to calculate z and T for.
:type pres: float or :class:`numpy.ndarray`
:return: temperatures (in K) and altitudes (in km). Arrays will be the same shape as the input altitudes.
:rtype: float or :class:`numpy.ndarray`, float or :class:`numpy.ndarray`
"""
# Since temperature varies linearly with altitude and altitude varies linearly vs. ln(p), interpolate both by the
# log of pressure
z_coord = np.flipud(const.z_ussa)
t_coord = np.flipud(const.t_ussa)
# must flip - np.interp expects its x-coordinates to be increasing.
p_coord = np.flipud(np.log(const.p_ussa))
pres = np.log(pres)
interp_args = {'left': np.nan, 'right': np.nan}
t = np.interp(pres, p_coord, t_coord, **interp_args)
z = np.interp(pres, p_coord, z_coord, **interp_args)
return t, z
def age_of_air(lat, z, ztrop, ref_lat=45.0):
"""
Calculate age of air using a function form from GGG 2014.
:param lat: the latitude(s) to calculate age of air for
:type lat: float or :class:`numpy.ndarray`
:param z: the altitude(s) to calculate age of air for. If both ``z`` and ``lat`` given as a vectors, they must be
the same shape. Must have units of kilometers.
:type z: float or :class:`numpy.ndarray`
:param ztrop: the tropopause altitude, in kilometers.
:type ztrop: float
:param ref_lat: the reference latitude for the cycle. This is where the exponential in latitude is maximized. 45N
was chosen as the default as the center of the northern hemisphere, where most anthropogenic emissions are.
:type ref_lat: float
:return: age of air, in years, as a numpy array
:rtype: :class:`numpy.ndarray`
"""
# Force z to be a numpy array. This allows us to use numpy indexing for the extra (stratospheric) term below and
# simultaneously ensures aoa is always a numpy array.
if not isinstance(z, np.ndarray):
z = np.array([z])
fl = lat/22.0
aoa = 0.313 - 0.085 * np.exp(-((lat-ref_lat)/18)**2) - 0.268*np.exp(-1.42 * z / (z+ztrop)) * fl / np.sqrt(1+fl**2)
# We limit the calculation to z > ztrop here because that avoids a divide-by-0 warning
# This term is really only kept in for completeness; in practice, it should never be used because we don't use
# this term in the stratosphere.
extra_term = 7.0 * (z[z > ztrop]-ztrop)/z[z > ztrop]
aoa[z > ztrop] += extra_term
return aoa
def seasonal_cycle_factor(lat, z, ztrop, fyr, species, ref_lat=45.0):
"""
Calculate a factor to multiply a concentration by to account for the seasonal cycle.
:param lat: the latitude(s) to calculate age of air for
:type lat: float or :class:`numpy.ndarray`
:param z: the altitude(s) to calculate age of air for. If both ``z`` and ``lat`` given as a vectors, they must be
the same shape. Must have units of kilometers.
:type z: float or :class:`numpy.ndarray`
:param ztrop: the tropopause altitude, in kilometers.
:type ztrop: float
:param fyr: the fraction of the year that corresponds to this date. You can convert a date time to this value with
:func:`date_to_frac_year`.
:type fyr: float
:param species: a child class of :class:`~tccon_priors.TraceGasTropicsRecord` that defines a gas name and seasonal
cycle coefficient. May be an instance of the class or the class itself. If the gas name is "co2", then a
CO2-specific parameterization is used.
:type species: :class:`~tccon_priors.TraceGasTropicsRecord`
:param ref_lat: reference latitude for the age of air. Set to 45N as an approximation of where the NH emissions are.
:type ref_lat: float
:return: the seasonal cycle factor as a numpy array. Multiply this by a deseasonalized concentration at (lat, z) to
get the concentration including the seasonal cycle
"""
if species.gas_seas_cyc_coeff is None:
raise TypeError('The species record ({}) does not define a seasonal cycle coefficient')
aoa = age_of_air(lat, z, ztrop, ref_lat=ref_lat)
if species.gas_name.lower() == 'co2':
sv = np.sin(2*np.pi *(fyr - 0.834 - aoa))
svnl = sv + 1.80 * np.exp(-((lat - 74)/41)**2)*(0.5 - sv**2)
sca = svnl * np.exp(-aoa/0.20)*(1 + 1.33*np.exp(-((lat-76)/48)**2) * (z+6)/(z+1.4))
else:
sv = np.sin(2*np.pi * (fyr - 0.78)) # basic seasonal variation
svl = sv * (lat / 15.0) / np.sqrt(1 + (lat / 15.0)**2.0) # latitude dependence
sca = svl * np.exp(-aoa / 0.85) # altitude dependence
return 1 + sca * species.gas_seas_cyc_coeff
def hf_ch4_slope_fit(yrs, a, b, c, t0):
"""
A fitting function appropriate to fit the trend of CH4 vs. HF slopes
This function has the form:
..math::
a * exp(b*(t - t0)) + c
where t is given in years.
:param yrs: t in the above equation.
:type yrs: :class:`numpy.ndarray`
:param a, b, c, t0: the fitting parameters in the above equation
:type a, b, c, t0: float
:return: the predicted slopes at ``yrs``
"""
return a * np.exp(b*(yrs - t0)) + c
# from https://stackoverflow.com/a/16562028
def isoutlier(data, m=2):
d = np.abs(data - np.nanmedian(data))
mdev = np.nanmedian(d)
s = d / mdev if mdev else 0.
return s >= m
def is_tropics(lat, doy, ages):
return np.abs(lat) < 20.0
def is_vortex(lat, doy, ages):
if not isinstance(doy, np.ndarray):
doy = np.full_like(lat, doy)
xx_vortex = np.zeros_like(lat, dtype=np.bool_)
xx_vortex[(doy > 140) & (doy < 245) & (lat < -55.0) & (ages > 3.25)] = True
xx_vortex[(doy > 275) & (doy < 60) & (lat > 55.0) & (ages > 3.25)] = True
return xx_vortex
def is_midlat(lat, doy, ages):
return ~is_tropics(lat, doy, ages) & ~is_vortex(lat, doy, ages)
def is_overworld(potential_temp, pressure, trop_pres):
return (potential_temp >= 380) & (pressure <= trop_pres)
def npdate_to_datetime(numpy_date):
numpy_date = numpy_date.astype('datetime64[s]')
ts = (numpy_date - np.datetime64('1970-01-01T00:00:00Z', 's')) / np.timedelta64(1, 's')
return dt.datetime.utcfromtimestamp(ts)
def date_to_decimal_year(date_in):
"""
Convert a datetime object to a decimal year.
A decimal year is e.g. 2018.5, where the part before the decimal is the year itself and after the decimal is how
much of the year has passed.
:param date_in: the datetime object to convert
:type date_in: datetime-like
:return: the decimal year
:rtype: float
"""
if date_in is not None:
return date_in.year + date_to_frac_year(date_in)
else:
return np.nan
def day_of_year(date_in):
return clams_day_of_year(date_in) - 1
def clams_day_of_year(date_in):
return float(date_in.strftime('%j'))
def date_to_frac_year(date_in):
"""
Convert a datetime object to a fraction of a year.
The fraction is essentially how much of the year has passed, so June 1st (of any year) becomes ~0.416.
Note: this function assumes 365.25 days per year. This is an imperfect solution, ideally a fractional year should
really describe the fraction of an orbit the Earth has completed, so only use this for things where +/- a day is an
insignificant error.
:param date_in: the datetime object to convert
:type date_in: datetime-like
:return: the fractional year
:rtype: float
"""
return day_of_year(date_in) / days_per_year # since there's about and extra quarter of a day per year that gives us leap years
def frac_year_to_doy(yr_in):
"""
Convert a fractional year to a day of year.
Internally, this multiplies by 365.25, so see the warning in the docstring for :func:`date_to_frac_year` about
its precision. Crucially, this is NOT a reliable inverse operation to :func:`date_to_frac_year`.
:param yr_in: the fractional year to convert
:type yr_in: float
:return: the number of days since 1 Jan
:rtype: float
"""
return yr_in * days_per_year
def frac_years_to_reldelta(frac_year, allow_nans=True):
"""
Convert a fractional year to a :class:`relativedelta` from dateutils.
Note: like the other fraction/decimal year functions, this assumes 365.25 days/year internally. Therefore, this
should function correctly as an inverse operation to date_to_frac_year when added back to Jan 1 of the year in
question.
:param frac_year: the fractional year(s) (e.g 2.5 for 2 and a half years) to convert
:type frac_year: float or a collection of floats
:param allow_nans: whether to permit NaNs in the decimal years. If ``True``, then NaNs will be retained in the
output list. If ``False``, an error is raised if NaNs are found in ``dec_year``.
:type allow_nans: bool
:return: a list of dateutils :class:`relativedelta` objects or a single :class:`relativedelta` if a scalar
``frac_year`` was given.
:rtype: :class:`relativedelta` or list(:class:`relativedelta`)
"""
if isinstance(frac_year, float):
return_scalar = True
frac_year = [frac_year]
else:
return_scalar = False
if not allow_nans and np.any(np.isnan(frac_year)):
raise ValueError('NaNs not permitted in frac_year. Either remove them, or set `allow_nans=True`')
age_years = np.floor(frac_year)
age_fracs = np.mod(frac_year, 1)
rdels = [relativedelta(years=y, days=days_per_year * d) if not (np.isnan(y) or np.isnan(d)) else np.nan for y, d in zip(age_years, age_fracs)]
if return_scalar:
rdels = rdels[0]
return rdels
def timedelta_to_frac_year(timedelta):
"""
Convert a concrete timedelta to fractional years
:param timedelta: the timedelta to convert
:type timedelta: :class:`datetime.timedelta`
:return: the time delta as a fraction of years, assuming 365.25 days per year.
:rtype: float
"""
return timedelta.total_seconds() / (days_per_year * 24 * 3600)
def decimal_year_to_date(dec_year, date_type=dt.datetime):
"""
Convert decimal year or years (e.g. 2018.5) to a datetime-like object
:param dec_year: the decimal year or years to convert. May be any kind of collection (if passing multiple values) so
long as it supports iteration and that iteration returns scalar values (i.e. a 2D numpy array will not work because
iteration returns rows).
:param date_type: what type to convert the decimal years in to. May be any time that can be called
``date_type(year, month, day)``.
:return: the converted dates in the type ``date_type``. If a single decimal date was passed in, then a single value
of type ``date_type`` is returned. If a collection was passed in, then the dates will be returned in a 1D numpy
array.
"""
if np.any(np.isnan(dec_year)):
raise NotImplementedError('NaNs in the input decimal years not implemented')
try:
dec_year[0]
except (TypeError, IndexError):
dec_year = [dec_year]
return_as_scalar = True
else:
return_as_scalar = False
years = np.array([int(d) for d in dec_year])
frac_yrs = np.array([d % 1 for d in dec_year])
dates = np.array([date_type(y, 1, 1) + frac_years_to_reldelta(fr, allow_nans=False) for y, fr in zip(years, frac_yrs)])
if return_as_scalar:
return dates[0]
else:
return dates
def start_of_month(date_in, out_type=dt.date):
"""
Get a date-like object corresponding to the beginning of the month of ``date_in``
:param date_in: Any date-like object that has attributes ``year`` and ``month``
:param out_type: A type whose constructor accepts the keyword arguments ``year``, ``month``, and ``day``.
:return: an instance of ``out_type`` set to day 1, 00:00:00 of the month of ``date_in``.
"""
return out_type(year=date_in.year, month=date_in.month, day=1)
def relativedelta2string(rdelta):
parts = ('years', 'months', 'days', 'hours', 'minutes', 'seconds')
time_parts = []
for p in parts:
tp = getattr(rdelta, p)
if tp > 0:
time_parts.append('{} {}'.format(tp, p))
return ', '.join(time_parts)
def gravity(gdlat,altit):
"""
copy/pasted from fortran routine comments
This is used to convert
Input Parameters:
gdlat GeoDetric Latitude (degrees)
altit Geometric Altitude (km)
Output Parameter:
gravity Effective Gravitational Acceleration (m/s2)
radius Radius of earth at gdlat
Computes the effective Earth gravity at a given latitude and altitude.
This is the sum of the gravitational and centripital accelerations.
These are based on equation I.2.4-(17) in US Standard Atmosphere 1962
The Earth is assumed to be an oblate ellipsoid, with a ratio of the
major to minor axes = sqrt(1+con) where con=.006738
This eccentricity makes the Earth's gravititational field smaller at
the poles and larger at the equator than if the Earth were a sphere
of the same mass. [At the equator, more of the mass is directly
below, whereas at the poles more is off to the sides). This effect
also makes the local mid-latitude gravity field not point towards
the center of mass.
The equation used in this subroutine agrees with the International
Gravitational Formula of 1967 (Helmert's equation) within 0.005%.
Interestingly, since the centripital effect of the Earth's rotation
(-ve at equator, 0 at poles) has almost the opposite shape to the
second order gravitational field (+ve at equator, -ve at poles),
their sum is almost constant so that the surface gravity could be
approximated (.07%) by the simple expression g=0.99746*GM/radius^2,
the latitude variation coming entirely from the variation of surface
r with latitude. This simple equation is not used in this subroutine.
"""
d2r=3.14159265/180.0 # Conversion from degrees to radians
gm=3.9862216e+14 # Gravitational constant times Earth's Mass (m3/s2)
omega=7.292116E-05 # Earth's angular rotational velocity (radians/s)
con=0.006738 # (a/b)**2-1 where a & b are equatorial & polar radii
shc=1.6235e-03 # 2nd harmonic coefficient of Earth's gravity field
eqrad=6378178.0 # Equatorial Radius (meters)
gclat=arctan(tan(d2r*gdlat)/(1.0+con)) # radians
radius=1000.0*altit+eqrad/np.sqrt(1.0+con*sin(gclat)**2)
ff=(radius/eqrad)**2
hh=radius*omega**2
ge=gm/eqrad**2 # = gravity at Re
gravity=(ge*(1-shc*(3.0*sin(gclat)**2-1)/ff)/ff-hh*cos(gclat)**2)*(1+0.5*(sin(gclat)*cos(gclat)*(hh/ge+2.0*shc/ff**2))**2)
return gravity, radius
def geopotential_height_to_altitude(gph, lat, alt):
"""
Convert a geopotential height in m^2 s^-2 to meters
:param gph: geopotential height in m^2 s^-2.
:type gph: float
:param lat: geographic latitude (in degrees, south is negative)
:type lat: float
:param alt: altitude of the TCCON site in kilometers. If set to 0, will use gravity at the surface for the given
latitude.
:type alt: float
:return: the geopotential height converted to meters
"""
gravity_at_site, _ = gravity(lat, alt)
return gph / gravity_at_site
def to_unix_time(datetime):
"""
Convert a datetime-like object into Unix time (seconds since midnight, 1 Jan 1970)
:param datetime: the datetime to convert. May be any type that can have a :class:`datetime.datetime` object
subtracted from it, and for which the subtraction has a method `total_seconds` that returns the time delta as
a number of seconds. Both :class:`datetime.datetime` and :class:`pandas.Timestamp` are examples.
:return: unix time
:rtype: float
"""
return (datetime - dt.datetime(1970, 1, 1)).total_seconds()
def from_unix_time(utime, out_type=dt.datetime):
"""
Convert a unix time into a datetime object.
:param utime: the unix time (seconds since midnight, 1 Jan 1970)
:type utime: float
:param out_type: optional, a type that represents a datetime which has an init method such that
``out_type(year, month, day)`` returns a object representing that time and can be added with a
:class:`datetime.timedelta``.
:type out_type: type
:return: a datetime object of the type specified by ``out_type``.
"""
return out_type(1970, 1, 1) + dt.timedelta(seconds=utime)
def mod_file_name(prefix,date,time_step,site_lat,site_lon_180,ew,ns,mod_path,round_latlon=True,in_utc=True):
YYYYMMDD = date.strftime('%Y%m%d')
HHMM = date.strftime('%H%M')
if in_utc:
HHMM += 'Z'
if round_latlon:
site_lat = round(abs(site_lat))
site_lon = round(abs(site_lon_180))
latlon_precision = 0
else:
site_lat = abs(site_lat)
site_lon = abs(site_lon_180)
latlon_precision = 2
if time_step < timedelta(days=1):
mod_fmt = '{{prefix}}_{{ymd}}_{{hm}}_{{lat:0>2.{prec}f}}{{ns:>1}}_{{lon:0>3.{prec}f}}{{ew:>1}}.mod'.format(prec=latlon_precision)
else:
mod_fmt = '{{prefix}}_{{ymd}}_{{lat:0>2.{prec}f}}{{ns:>1}}_{{lon:0>3.{prec}f}}{{ew:>1}}.mod'.format(prec=latlon_precision)
mod_name = mod_fmt.format(prefix=prefix, ymd=YYYYMMDD, hm=HHMM, lat=site_lat, ns=ns, lon=site_lon, ew=ew)
return mod_name
def mod_file_name_for_priors(datetime, site_lat, site_lon_180, prefix='FPIT', **kwargs):
if site_lon_180 > 180:
site_lon_180 -= 360
ew = format_lon(site_lon_180)[-1]
ns = format_lat(site_lat)[-1]
return mod_file_name(prefix=prefix, date=datetime, time_step=dt.timedelta(hours=3), site_lat=site_lat,
site_lon_180=site_lon_180, ew=ew, ns=ns, mod_path='', **kwargs)
def parse_date_range(datestr):
def parse_date(datestr):
try:
date_out = dt.datetime.strptime(datestr, '%Y%m%d')
except ValueError:
date_out = dt.datetime.strptime(datestr, '%Y%m%d_%H')
return date_out
dates = datestr.split('-')
start_date = parse_date(dates[0])
if len(dates) > 1:
end_date = parse_date(dates[1])
else:
end_date = start_date + timedelta(days=1)
return start_date, end_date
def get_ggg_path(subdir, subdir_name):
gggpath = os.getenv('GGGPATH')
if gggpath is None:
raise GGGPathError('Could not find the GGGPATH environmental variable. Please specify an explicit {}.'.format(subdir_name))
full_subdir = os.path.join(gggpath, subdir)
if not os.path.isdir(full_subdir):
raise GGGPathError('Could not find default {} {}'.format(subdir_name, full_subdir))
return full_subdir
| null |
ginput/common_utils/mod_utils.py
|
mod_utils.py
|
py
| 94,269 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.array",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.write",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "os.path.getmtime",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "os.path.getmtime",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 223,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "numpy.abs",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 253,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 254,
"usage_type": "attribute"
},
{
"api_name": "re.sub",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 379,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 379,
"usage_type": "attribute"
},
{
"api_name": "os.getenv",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 386,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 387,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 392,
"usage_type": "attribute"
},
{
"api_name": "numpy.size",
"line_number": 463,
"usage_type": "call"
},
{
"api_name": "numpy.ndim",
"line_number": 465,
"usage_type": "call"
},
{
"api_name": "numpy.size",
"line_number": 467,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 479,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 479,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndim",
"line_number": 580,
"usage_type": "call"
},
{
"api_name": "ggg_logging.logger.warning",
"line_number": 592,
"usage_type": "call"
},
{
"api_name": "ggg_logging.logger",
"line_number": 592,
"usage_type": "name"
},
{
"api_name": "numpy.shape",
"line_number": 594,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 596,
"usage_type": "call"
},
{
"api_name": "numpy.ndim",
"line_number": 598,
"usage_type": "call"
},
{
"api_name": "numpy.size",
"line_number": 617,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 655,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 658,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 659,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 661,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 667,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 673,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 674,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 678,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 748,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 818,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 842,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 852,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 852,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 853,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 853,
"usage_type": "attribute"
},
{
"api_name": "subprocess.check_output",
"line_number": 863,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 874,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 901,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 902,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 905,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 905,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 907,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 907,
"usage_type": "attribute"
},
{
"api_name": "os.path.samefile",
"line_number": 907,
"usage_type": "call"
},
{
"api_name": "numpy.sign",
"line_number": 934,
"usage_type": "call"
},
{
"api_name": "numpy.floor",
"line_number": 935,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 935,
"usage_type": "call"
},
{
"api_name": "numpy.tile",
"line_number": 957,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",
"line_number": 957,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 982,
"usage_type": "call"
},
{
"api_name": "numpy.flatnonzero",
"line_number": 984,
"usage_type": "call"
},
{
"api_name": "numpy.full",
"line_number": 994,
"usage_type": "call"
},
{
"api_name": "numpy.cumsum",
"line_number": 997,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 998,
"usage_type": "call"
},
{
"api_name": "numpy.rollaxis",
"line_number": 1001,
"usage_type": "call"
},
{
"api_name": "numpy.rollaxis",
"line_number": 1003,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 1013,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 1015,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 1015,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 1032,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 1033,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 1047,
"usage_type": "call"
},
{
"api_name": "numpy.deg2rad",
"line_number": 1053,
"usage_type": "call"
},
{
"api_name": "numpy.deg2rad",
"line_number": 1054,
"usage_type": "call"
},
{
"api_name": "numpy.deg2rad",
"line_number": 1056,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 1056,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 1056,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 1058,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 1058,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 1061,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 1061,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 1062,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 1062,
"usage_type": "call"
},
{
"api_name": "numpy.full_like",
"line_number": 1083,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 1083,
"usage_type": "attribute"
},
{
"api_name": "numpy.unravel_index",
"line_number": 1093,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 1126,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 1127,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 1129,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 1130,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 1135,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 1135,
"usage_type": "call"
},
{
"api_name": "numpy.nanmin",
"line_number": 1139,
"usage_type": "call"
},
{
"api_name": "numpy.nanmax",
"line_number": 1140,
"usage_type": "call"
},
{
"api_name": "numpy.size",
"line_number": 1141,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 1144,
"usage_type": "call"
},
{
"api_name": "numpy.interp",
"line_number": 1147,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 1150,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 1151,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 1153,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 1154,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 1157,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 1160,
"usage_type": "call"
},
{
"api_name": "numpy.arcsin",
"line_number": 1161,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 1161,
"usage_type": "attribute"
},
{
"api_name": "numpy.nanmin",
"line_number": 1165,
"usage_type": "call"
},
{
"api_name": "numpy.nanmax",
"line_number": 1168,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 1171,
"usage_type": "call"
},
{
"api_name": "numpy.interp",
"line_number": 1173,
"usage_type": "call"
},
{
"api_name": "scipy.interpolate.interp2d",
"line_number": 1175,
"usage_type": "call"
},
{
"api_name": "numpy.full_like",
"line_number": 1179,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 1179,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 1233,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 1233,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 1293,
"usage_type": "attribute"
},
{
"api_name": "netCDF4.Dataset",
"line_number": 1301,
"usage_type": "call"
},
{
"api_name": "numpy.ma.allclose",
"line_number": 1317,
"usage_type": "call"
},
{
"api_name": "numpy.ma",
"line_number": 1317,
"usage_type": "name"
},
{
"api_name": "numpy.ma.allclose",
"line_number": 1319,
"usage_type": "call"
},
{
"api_name": "numpy.ma",
"line_number": 1319,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 1344,
"usage_type": "attribute"
},
{
"api_name": "numpy.ma.concatenate",
"line_number": 1346,
"usage_type": "attribute"
},
{
"api_name": "numpy.ma",
"line_number": 1346,
"usage_type": "name"
},
{
"api_name": "pandas.DatetimeIndex",
"line_number": 1355,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 1357,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 1357,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 1358,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 1358,
"usage_type": "attribute"
},
{
"api_name": "pandas.Timedelta",
"line_number": 1388,
"usage_type": "call"
},
{
"api_name": "pandas.date_range",
"line_number": 1394,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 1435,
"usage_type": "call"
},
{
"api_name": "numpy.isin",
"line_number": 1437,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 1443,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 1452,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 1452,
"usage_type": "attribute"
},
{
"api_name": "re.search",
"line_number": 1453,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 1454,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 1454,
"usage_type": "attribute"
},
{
"api_name": "netCDF4.Dataset",
"line_number": 1458,
"usage_type": "call"
},
{
"api_name": "numpy.full",
"line_number": 1494,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 1494,
"usage_type": "attribute"
},
{
"api_name": "numpy.argwhere",
"line_number": 1498,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 1502,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 1509,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 1570,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 1571,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 1574,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 1575,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 1577,
"usage_type": "call"
},
{
"api_name": "scipy.interpolate.interp1d",
"line_number": 1579,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 1585,
"usage_type": "call"
},
{
"api_name": "numpy.nanmin",
"line_number": 1612,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 1647,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 1648,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 1652,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 1652,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 1660,
"usage_type": "call"
},
{
"api_name": "numpy.interp",
"line_number": 1661,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 1662,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 1665,
"usage_type": "call"
},
{
"api_name": "numpy.interp",
"line_number": 1666,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 1672,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
"line_number": 1718,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 1718,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 1719,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 1720,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 1753,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 1755,
"usage_type": "attribute"
},
{
"api_name": "numpy.interp",
"line_number": 1756,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 1757,
"usage_type": "call"
},
{
"api_name": "numpy.interp",
"line_number": 1757,
"usage_type": "call"
},
{
"api_name": "numpy.flipud",
"line_number": 1778,
"usage_type": "call"
},
{
"api_name": "numpy.flipud",
"line_number": 1779,
"usage_type": "call"
},
{
"api_name": "numpy.flipud",
"line_number": 1781,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 1781,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 1783,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 1785,
"usage_type": "attribute"
},
{
"api_name": "numpy.interp",
"line_number": 1786,
"usage_type": "call"
},
{
"api_name": "numpy.interp",
"line_number": 1787,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 1815,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 1816,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 1819,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 1819,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 1863,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 1863,
"usage_type": "attribute"
},
{
"api_name": "numpy.exp",
"line_number": 1864,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 1865,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 1867,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 1867,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 1868,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 1869,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 1893,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 1898,
"usage_type": "call"
},
{
"api_name": "numpy.nanmedian",
"line_number": 1898,
"usage_type": "call"
},
{
"api_name": "numpy.nanmedian",
"line_number": 1899,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 1905,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 1909,
"usage_type": "attribute"
},
{
"api_name": "numpy.full_like",
"line_number": 1910,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 1911,
"usage_type": "call"
},
{
"api_name": "numpy.bool_",
"line_number": 1911,
"usage_type": "attribute"
},
{
"api_name": "numpy.datetime64",
"line_number": 1927,
"usage_type": "call"
},
{
"api_name": "numpy.timedelta64",
"line_number": 1927,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcfromtimestamp",
"line_number": 1928,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 1928,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 1947,
"usage_type": "attribute"
},
{
"api_name": "mod_constants.days_per_year",
"line_number": 1974,
"usage_type": "name"
},
{
"api_name": "mod_constants.days_per_year",
"line_number": 1990,
"usage_type": "name"
},
{
"api_name": "numpy.any",
"line_number": 2017,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 2017,
"usage_type": "call"
},
{
"api_name": "numpy.floor",
"line_number": 2019,
"usage_type": "call"
},
{
"api_name": "numpy.mod",
"line_number": 2020,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 2021,
"usage_type": "call"
},
{
"api_name": "dateutil.relativedelta.relativedelta",
"line_number": 2021,
"usage_type": "call"
},
{
"api_name": "mod_constants.days_per_year",
"line_number": 2021,
"usage_type": "name"
},
{
"api_name": "numpy.nan",
"line_number": 2021,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta.total_seconds",
"line_number": 2039,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 2039,
"usage_type": "name"
},
{
"api_name": "mod_constants.days_per_year",
"line_number": 2039,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 2042,
"usage_type": "attribute"
},
{
"api_name": "numpy.any",
"line_number": 2058,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 2058,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 2069,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 2070,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 2071,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 2079,
"usage_type": "attribute"
},
{
"api_name": "numpy.core._multiarray_umath.arctan",
"line_number": 2145,
"usage_type": "call"
},
{
"api_name": "numpy.core._multiarray_umath.tan",
"line_number": 2145,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 2147,
"usage_type": "call"
},
{
"api_name": "numpy.core._multiarray_umath.sin",
"line_number": 2147,
"usage_type": "call"
},
{
"api_name": "numpy.core._multiarray_umath.sin",
"line_number": 2152,
"usage_type": "call"
},
{
"api_name": "numpy.core._multiarray_umath.cos",
"line_number": 2152,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 2188,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 2191,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 2205,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 2222,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 2236,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 2243,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 2243,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 2245,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 2245,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 2253,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 2259,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 2262,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 2262,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 2263,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 2263,
"usage_type": "attribute"
}
] |
428463715
|
import numpy as np
import matplotlib.pyplot as mp
train_x = [0.5,0.6,0.8,1.1,1.4]
train_y = [5.0,5.5,6.0,6.8,7.0]
#实现梯度下降
times = 1000
lrate = 0.01
w0,w1 = [1],[1]#记录每次梯度下降的参数
for i in range(1,times+1):
#每次梯度下降过程需要求出w0与w1的修正值
#球修正值需要推导loss哈市南湖在w0与w1方向的偏岛
d0 = (w0[-1]+w1[-1]*train_x-train_y)*sum()
#绘制样本点
mp.figure('Linear Regression',facecolor='lightgray')
mp.title('Linear Regression')
mp.grid(linestyle=':')
mp.scatter(train_x,train_y,s=60,marker='o',c='orangered',label='Samples')
mp.legend()
mp.show()
| null |
aid1901/day1/demo3_lr线性回归.py
|
demo3_lr线性回归.py
|
py
| 637 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
}
] |
435257901
|
import os
import base64
import requests
import time
from io import BytesIO
from PIL import Image
class Captcha():
def __init__(self, driver, img_element):
self.driver = driver
self.img_element = img_element
def parse(self):
try:
img = self.driver.find_element_by_xpath(".//*[@id='{}']".format(self.img_element))
with open('captcha.png', 'wb') as file:
file.write(img.screenshot_as_png)
# logging.info("Save Image")
buffer = BytesIO()
image = Image.open('captcha.png')
image.save(buffer, format="PNG")
img_str = base64.b64encode(buffer.getvalue()).decode("utf-8")
data = {
"clientKey": os.getenv("ANTI_CAPTCHA_KEY"),
"task": {
"type": "ImageToTextTask",
"body": img_str,
"phrase":False,
"case": True,
"numeric": 2,
"math": 0,
"minLength": 4,
"maxLength": 4
}
}
r = requests.post("https://api.anti-captcha.com/createTask", json=data)
r.raise_for_status()
print(r.json())
if r.json()['errorId'] == 2:
print(r.json())
self.parse()
else:
task_id = r.json()['taskId']
ret = ""
while True:
data = {
"clientKey": os.getenv("ANTI_CAPTCHA_KEY"),
'taskId': task_id
}
r = requests.post("https://api.anti-captcha.com/getTaskResult", json=data)
r.raise_for_status()
if r.json()['status'] == 'ready':
ret = r.json()['solution']['text']
break
print('trying')
# logging.info("tring")
time.sleep(5)
return ret
except:
raise AttributeError('captcha error')
| null |
captcha.py
|
captcha.py
|
py
| 2,112 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "io.BytesIO",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "base64.b64encode",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 59,
"usage_type": "call"
}
] |
150124130
|
#! -*- coding: utf-8 -*-
__author__ = 'Yan.zhe 2021.2.1'
# from script_collection.import_share import *
from start_the_service.app_instance import *
import typing
class JointFrameWizardFrameStore(AppStore):
"""
The frame test, app_type:
photo、music、video、alarm、weather、calender、setting、wizard
"""
def create_app(self, app_type) -> APPWizard:
app: typing.Optional[APPWizard] = None
# Start the Photo module's app
if app_type == 'traverse the wizard':
app = APPWizard()
return app
class TraverseWizard(object):
def __init__(self):
# Instantiate app
app_store = JointFrameWizardFrameStore()
order_app_dict = app_store.order_app('traverse the wizard')
# get frame driver
self.frame_driver = order_app_dict['driver']
self.app = order_app_dict['app']
def traverse_the_wizard_page(self):
self.app.traverse_the_wizard(self.frame_driver)
if __name__ == '__main__':
logger.info("The wizard app traversal begins")
TraverseWizard().traverse_the_wizard_page()
logger.info("The wizard app traversal ends")
| null |
jointframe/script_collection/traverse_wizard.py
|
traverse_wizard.py
|
py
| 1,171 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "typing.Optional",
"line_number": 16,
"usage_type": "attribute"
}
] |
369694246
|
import os
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
import re
import json
uri = "http://www.radiorecord.ru/player/"
prefix = "http://air.radiorecord.ru:805/"
suffix = "_320"
print("Please wait, making Web Scrapping from " + uri)
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--window-size=1500x1500")
chrome_driver = os.path.join(os.getcwd(), "chromedriver.exe")
driver = webdriver.Chrome(chrome_options=chrome_options, executable_path=chrome_driver)
driver.set_page_load_timeout(1000)
driver.set_script_timeout(1000)
driver.get(uri)
print(driver.title)
driver.save_screenshot("screenshot.png")
print("saved screenshot.png")
a = driver.find_element(by=By.XPATH, value="//div[@class='lists lists_stations']")
a = a.find_elements(by=By.XPATH, value="//a")
i = 1
ajson = []
for element in a:
attr = element.get_attribute("onclick")
if attr is not None:
if "onStation" in attr:
station_code = re.split("'", attr)[1]
station_name = element.find_element_by_id("station").find_element_by_id("station_title").text
station_uri = prefix + station_code + suffix
print(station_code + " : " + station_name + " (" + station_uri + ")")
ajson.append({"station_name": station_name, "station_code": station_code, "station_uri": station_uri})
i += 1
with open('stations.json', 'w', encoding='utf8') as outfile:
outfile.write(json.dumps({"stations": ajson}, ensure_ascii=False))
print("Scrapped "+str(i)+" stations! ")
print("Stations saved to stations")
| null |
radiorecord_to_json.py
|
radiorecord_to_json.py
|
py
| 1,671 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "re.split",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 42,
"usage_type": "call"
}
] |
303021081
|
#!/usr/bin/env python
#
# Author: Thamme Gowda [tg (at) isi (dot) edu]
# Created: 2019-11-12
from typing import List, Any, Iterable, Dict, Tuple
import collections as coll
from nlcodec import log
from tqdm import tqdm
def make_n_grams(sent: List[Any], n):
assert n > 0
return [tuple(sent[i: i + n]) for i in range(len(sent) - n + 1)]
def make_n_grams_all(sents: Iterable[List[Any]], n):
grams = coll.Counter()
n_sent = 0
for sent in tqdm(sents, mininterval=1, dynamic_ncols=True):
grams.update(make_n_grams(sent, n))
n_sent += 1
log.info(f"Made {n}-grams: types={len(grams)}; tokens={sum(grams.values())}")
return grams
def filter_types_coverage(types: Dict[str, int], coverage=1.0) -> Tuple[Dict[str, int], int]:
assert 0 < coverage <= 1
tot = sum(types.values())
includes = {}
cum = 0
types = sorted(types.items(), key=lambda x: x[1], reverse=True)
for t, f in types:
cum += f / tot
includes[t] = f
if cum >= coverage:
break
log.info(f'Coverage={cum:g}; requested={coverage:g}')
excludes = {ch: ct for ch, ct in types if ch not in includes}
unk_count = sum(excludes.values())
log.warning(f'UNKed total toks:{unk_count} types={len(excludes)} from types:{excludes}')
return includes, unk_count
| null |
nlcodec/utils.py
|
utils.py
|
py
| 1,325 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "typing.List",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "collections.Counter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "nlcodec.log.info",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "nlcodec.log",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "nlcodec.log.info",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "nlcodec.log",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "nlcodec.log.warning",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "nlcodec.log",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 25,
"usage_type": "name"
}
] |
219200213
|
'''
功能:
宿舍代码正确检查
{输入}宿舍代码,网站driver
{输出}错误代码,宿舍信息字典,电费信息字典
根据错误代码判断宿舍代码错误类型:
err=200:正常,dormDic,elecDic有效
err=201:宿舍代码长度异常,dormDic={},elecDic={}
err=202:宿舍区不再服务范围,dormDic={},elecDic={}
err=404:电费查询异常,可能打不开网站,也可能是查无此房间,dormDic={},elecDic={}
============================================================================
pakuzhou,2018-08-05
'''
import re
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
#-------------------------------------
from GetLeft import getLeft
def code2dic(code):
err = 0
dormDic = {}
if len(code) != 8:
return err+1,dormDic
louDic = {
"50":"博学",
"51":"凌云",
"52":"映雪",
"09":"号楼",#海韵1-8
"08":"海韵",
#-----------#
"01":"芙蓉",
"02":"石井",
"03":"南光",
#"04":"凌云",
"06":"新区",
"07":"丰庭",
#-----------#
"21":"博学",
"22":"囊莹",
"23":"笃行",
"24":"映雪",
"25":"勤业",
"27":"若谷",
"28":"凌云",
"29":"丰庭",
"30":"南安",
"31":"南光",
#"32":"漳校嘉庚若谷",#
"33":"翔安芙蓉",
"34":"翔安南安",
"35":"翔安南光",
"40":"DX",
"41":"FT",
"42":"GG"
}
# 获取校区代码并判断该校区是否在服务区
xq = code[0:2]
try:
louName = louDic[xq]
except:
return err+2,dormDic
# 获取宿舍楼代码 并组合出宿舍楼
louCode = code[2:4]
# 本部芙蓉 或者 海滨新区
if xq == '01' or xq == '06':
if louCode == '01':
louCode = "一"
elif louCode == '02':
louCode = "二"
elif louCode == '03':
louCode = "三"
elif louCode == '04':
louCode = "四"
elif louCode == '05':
louCode = '五'
elif louCode == '06':
louCode = '六'
elif louCode == '07':
louCode = '七'
elif louCode == '08':
louCode = '八'
elif louCode == '09':
louCode = '九'
elif louCode == '10':
louCode = '十'
elif louCode == '11':
louCode = "11"
elif louCode == '12':
louCode = '12'
elif louCode == '13':
louCode = '十三'
else:
if louCode[0] == '0':
if xq == '08':
louCode = '09' # 海韵9的命名方式不同
else:
louCode = code[3]
else:
louCode = code[2:4]
if louName == '号楼':
lou = louCode+louName
else:
lou = louName+louCode
# 丰庭3叫笃行3
if xq == '07' and code[2:4] == '03':
lou = '笃行3'
# 南光区
if xq == '03':
if code[2:4] == '16':
lou = '综合楼'
elif code[2:4] == '04':
lou = '南光四'
elif code[2:4] == '07':
lou = '南光七'
room = code[4:8]
dormDic['xq'] = xq
dormDic['lou'] = lou
dormDic['room'] = room
return err,dormDic
# 仅可对普通的dict进行处理
# def dic2code(dormDic):
# lou = re.findall('\d+',dormDic['lou'])
# if len(lou) == 1:
# lou = '0'+lou[0]
# code = dormDic['xq']+lou+dormDic['room']
# return code
def codeCheck(code):
err = 200
elecDic = {}
p_err,dormDic = code2dic(code)
if p_err:
err = err+p_err
return err,dormDic,elecDic
else:
chrome_options = Options()
chrome_options.add_argument("--headless")
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.get('http://elec.xmu.edu.cn/PdmlWebSetup/Pages/SMSMain.aspx')
g_err,elecDic = getLeft(dormDic,driver)
err = err+g_err
driver.close()
return err,dormDic,elecDic
if __name__ == '__main__':
chrome_options = Options()
chrome_options.add_argument("--headless")
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.get('http://elec.xmu.edu.cn/PdmlWebSetup/Pages/SMSMain.aspx')
dormCodes = ['03160201','03040201','03070201','02030317','01090201','01110201','07020201','07030201','06010201','52061115']
for dormCode in dormCodes:
print('-------------------------')
err,dormDic = code2dic(dormCode)
print(err)
print(dormDic)
# print(elecDic)
driver.close()
| null |
H2X/PacooBot/CodeChecker.py
|
CodeChecker.py
|
py
| 4,748 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "GetLeft.getLeft",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 162,
"usage_type": "name"
}
] |
558412576
|
# A perfect power is a classification of positive integers:
#
# In mathematics, a perfect power is a positive integer that can be expressed as an integer power
# of another positive integer. More formally,
# n is a perfect power if there exist natural numbers m > 1, and k > 1 such that mk = n.
#
# Your task is to check wheter a given integer is a perfect power. If it is a perfect power,
# return a pair m and k with mk = n as a proof. Otherwise return Nothing, Nil, null, NULL,
# None or your language's equivalent.
#
# Note: For a perfect power, there might be several pairs.
# For example 81 = 3^4 = 9^2, so (3,4) and (9,2) are valid solutions.
# However, the tests take care of this, so if a number is a perfect power, return any pair that proves it.
#
# Examples
# isPP(4) => [2,2]
# isPP(9) => [3,2]
# isPP(5) => None
from math import sqrt, log
from pip._vendor.msgpack.fallback import xrange
def is_pp(number):
res = []
for num in range(number):
for n in range(number):
if num ** n == number:
res.append((num, n))
return res
def is_pp2(number):
for num in xrange(2, int(sqrt(number))+1):
n = int(round(log(number, num)))
if num ** n == number:
return [num, n]
return None
number = 81
print(is_pp(number))
print(is_pp2(number))
| null |
CodersWars/Whats_a_Perfect_Power_anyway.py
|
Whats_a_Perfect_Power_anyway.py
|
py
| 1,326 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pip._vendor.msgpack.fallback.xrange",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 35,
"usage_type": "call"
}
] |
605219316
|
from app.dataset import Dataset
from app.output import Output
from app.segmenter import Segmenter
from app.utils import Utils
from app.debug import Debug
import numpy as np
import cv2
dataset = Dataset()
dataset.load()
segmenter = Segmenter()
Output.clear()
for item in dataset.items:
print(item.file)
#if item.file != "008.jpg":
# continue
#
# run the segmentation algorithm
#
quad = segmenter.segment(item.img, item.distribution_rect)
#
# draw the result
#
# reference image
Output.write_image(item.file + "_0_norm.jpg", segmenter.img_normalized)
# preprocessed image
Output.write_image(item.file + "_1_preprop.jpg", segmenter.img_preprocessed)
# distance map
Output.write_image(item.file + "_2_distances.jpg", segmenter.distances_img)
if quad is None:
print("No region found.")
continue
# poly to quad process
Output.write_image(
item.file + "_3_region.jpg",
segmenter.draw_region_over(
segmenter.img_normalized,
segmenter.region
)
)
# poly to quad process
Output.write_image(item.file + "_4_quad.jpg", segmenter.img_quad)
# image with resulting quad only
Output.write_image(
item.file + "_5_result.jpg",
cv2.polylines(item.img, [quad.to_polyline()], True, (255, 0, 0), 10)
)
| null |
app/__main__.py
|
__main__.py
|
py
| 1,385 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "app.dataset.Dataset",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "app.segmenter.Segmenter",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "app.output.Output.clear",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "app.output.Output",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "app.output.Output.write_image",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "app.output.Output",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "app.output.Output.write_image",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "app.output.Output",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "app.output.Output.write_image",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "app.output.Output",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "app.output.Output.write_image",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "app.output.Output",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "app.output.Output.write_image",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "app.output.Output",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "app.output.Output.write_image",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "app.output.Output",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "cv2.polylines",
"line_number": 59,
"usage_type": "call"
}
] |
328345788
|
"""PyTorch implementation of a ResNet with 2D CNNs
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv:1603.05027
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class ResBlock2D(nn.Module):
"""A basic residual block with 2D CNNs
Defines a convolutional ResNet block with the following architecture:
-- Shortcut Path -->
+-------- Shortcut Layer --------+
| |
X -> Conv2D -> Act -> Conv2D -> Sum -> Act -> Output
-- Main Path -->
The shortcut layer defaults to zero padding if the non-plane dimensions of
X do not change after convolutions. Otherwise, it defaults to a 2D
convolution to match dimensions.
"""
expansion = 1
def __init__(self, in_channels, out_channels, kernel_size=(3, 3), stride=1, shortcut=None,
activation=F.relu):
"""
:param in_channels: The number of input channels (features)
:param out_channels: The number of output channels (features)
:param kernel_size: Width of the kernel used in the Conv2D convolution.
:param stride: Stride used in the Conv2D convolution.
:param shortcut: Callable function to for the shortcut path
"""
super(ResBlock2D, self).__init__()
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
self.activation = activation
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=(stride, 1),
padding=(kernel_size[0]//2, kernel_size[1]//2), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size, stride=(stride, 1),
padding=(kernel_size[0]//2, kernel_size[1]//2), bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
self.stride = stride
# Default zero padding shortcut
if shortcut is None and stride == 1:
self.shortcut = lambda x: F.pad(x, pad=(0, 0, 0, 0, 0, out_channels - x.shape[1], 0, 0))
# Default conv1D shortcut
elif shortcut is None and stride != 1:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, self.expansion * out_channels, kernel_size=1,
stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * out_channels))
# User defined shortcut
else:
self.shortcut = shortcut
def forward(self, x):
"""
:param x: A FloatTensor to propagate forward
:type x: torch.Tensor
:return:
"""
out = self.activation(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = self.activation(out)
return out
class ResNet2D(nn.Module):
def __init__(self, in_channels, block, num_blocks, init_channels=64, kernel_size=3, activation=F.relu):
"""
:param in_channels: The number of channels coming from the input.
:type in_channels: int
:param block: The type of residual block to use.
:type block: torch.nn.Module
:param num_blocks:
A list of the number of blocks per layer. Each layer increases the
number of channels by a factor of 2.
:type num_blocks: List[int]
:param init_channels: The number of channels the first 1D CNN should output.
Must be a power of 2.
:type init_channels: int
:param kernel_size: Size of the convolving kernel used in the Conv2D
convolution.
:type kernel_size: int
"""
super(ResNet2D, self).__init__()
# Check if the number of initial planes is a power of 2, done for faster computation on GPU
if not (init_channels != 0 and ((init_channels & (init_channels - 1)) == 0)):
raise ValueError('The initial number of planes must be a power of 2')
self.activation = activation
self.kernel_size = kernel_size
self.init_planes = init_channels
self.in_planes = self.init_planes # Number of input planes to the final layer
self.num_layers = len(num_blocks)
self.conv1 = nn.Conv2d(in_channels, self.in_planes, kernel_size=kernel_size,
stride=(1, 1), padding=(kernel_size//2, kernel_size//2),
bias=False)
self.bn1 = nn.BatchNorm2d(self.in_planes)
self.layers = []
# Raise the number of planes by a power of two for each layer
for i in range(0, self.num_layers):
new_layer = self._make_layer(block, int(self.init_planes * math.pow(2, i)),
num_blocks[i], stride=1, kernel_size=kernel_size)
self.layers.append(new_layer)
# Done to ensure layer information prints out when print() is called
setattr(self, 'layer{}'.format(i), new_layer)
def _make_layer(self, block, planes, num_blocks, stride, kernel_size):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride=stride,
kernel_size=kernel_size))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.activation(self.bn1(self.conv1(x)))
for layer in self.layers:
out = layer(out)
return out
def ResNet2D18(in_channels, **kwargs):
return ResNet2D(in_channels, ResBlock2D, [2, 2, 2, 2], **kwargs)
def ResNet2D34(in_channels, **kwargs):
return ResNet2D(in_channels, ResBlock2D, [3, 4, 6, 3], **kwargs)
| null |
src/layers/ResNet2D.py
|
ResNet2D.py
|
py
| 5,909 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.nn.Module",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.pad",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "math.pow",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 126,
"usage_type": "name"
}
] |
325983612
|
"""An execution engine for Python processes."""
import copy
import inspect
import json
import os
import shlex
import shutil
from resolwe.flow.execution_engines.base import BaseExecutionEngine
from resolwe.flow.models.utils import (
get_collection_of_input_entities,
hydrate_input_references,
hydrate_input_uploads,
serialize_collection_relations,
)
from resolwe.process.parser import SafeParser
PYTHON_RUNTIME_DIRNAME = "python_runtime"
PYTHON_RUNTIME_ROOT = "/"
PYTHON_RUNTIME_VOLUME = os.path.join(PYTHON_RUNTIME_ROOT, PYTHON_RUNTIME_DIRNAME)
PYTHON_PROGRAM_ROOT = "/"
PYTHON_PROGRAM_FILENAME = "python_process.py"
PYTHON_PROGRAM_VOLUME = os.path.join(PYTHON_PROGRAM_ROOT, PYTHON_PROGRAM_FILENAME)
PYTHON_INPUTS_FILENAME = "inputs.json"
PYTHON_INPUTS_ROOT = "/"
PYTHON_INPUTS_VOLUME = os.path.join(PYTHON_INPUTS_ROOT, PYTHON_INPUTS_FILENAME)
PYTHON_RELATIONS_FILENAME = "relations.json"
PYTHON_RELATIONS_ROOT = "/"
PYTHON_RELATIONS_VOLUME = os.path.join(PYTHON_RELATIONS_ROOT, PYTHON_RELATIONS_FILENAME)
PYTHON_REQUIREMENTS_FILENAME = "requirements.json"
PYTHON_REQUIREMENTS_ROOT = "/"
PYTHON_REQUIREMENTS_VOLUME = os.path.join(
PYTHON_INPUTS_ROOT, PYTHON_REQUIREMENTS_FILENAME
)
class ExecutionEngine(BaseExecutionEngine):
"""An execution engine that outputs bash programs."""
name = "python"
def discover_process(self, path):
"""Perform process discovery in given path.
This method will be called during process registration and
should return a list of dictionaries with discovered process
schemas.
"""
if not path.lower().endswith(".py"):
return []
parser = SafeParser(open(path).read())
processes = parser.parse()
return [process.to_schema() for process in processes]
def evaluate(self, data):
"""Evaluate the code needed to compute a given Data object."""
return (
'PYTHONPATH="{runtime}" python3 -u -m resolwe.process {program} '
"--slug {slug} "
"--name {name} "
"--inputs {inputs} "
"--relations {relations} "
"--requirements {requirements}".format(
runtime=PYTHON_RUNTIME_VOLUME,
program=PYTHON_PROGRAM_VOLUME,
slug=shlex.quote(data.process.slug),
name=shlex.quote(data.name),
inputs=PYTHON_INPUTS_VOLUME,
relations=PYTHON_RELATIONS_VOLUME,
requirements=PYTHON_REQUIREMENTS_VOLUME,
)
)
def prepare_runtime(self, runtime_dir, data):
"""Prepare runtime directory."""
# Copy over Python process runtime (resolwe.process).
import resolwe.process as runtime_package
src_dir = os.path.dirname(inspect.getsourcefile(runtime_package))
dest_package_dir = os.path.join(
runtime_dir, PYTHON_RUNTIME_DIRNAME, "resolwe", "process"
)
shutil.copytree(src_dir, dest_package_dir)
os.chmod(dest_package_dir, 0o755)
# Write python source file.
source = data.process.run.get("program", "")
program_path = os.path.join(runtime_dir, PYTHON_PROGRAM_FILENAME)
with open(program_path, "w") as file:
file.write(source)
os.chmod(program_path, 0o755)
# Write serialized inputs.
inputs = copy.deepcopy(data.input)
hydrate_input_references(inputs, data.process.input_schema)
hydrate_input_uploads(inputs, data.process.input_schema)
inputs_path = os.path.join(runtime_dir, PYTHON_INPUTS_FILENAME)
# XXX: Skip serialization of LazyStorageJSON. We should support
# LazyStorageJSON in Python processes on the new communication protocol
def default(obj):
"""Get default value."""
class_name = obj.__class__.__name__
if class_name == "LazyStorageJSON":
return ""
raise TypeError(f"Object of type {class_name} is not JSON serializable")
with open(inputs_path, "w") as file:
json.dump(inputs, file, default=default)
# Write serialized requirements.
# Include special 'requirements' variable in the context.
requirements = copy.deepcopy(data.process.requirements)
# Inject default values and change resources according to
# the current Django configuration.
requirements["resources"] = data.process.get_resource_limits()
requirements_path = os.path.join(runtime_dir, PYTHON_REQUIREMENTS_FILENAME)
with open(requirements_path, "w") as file:
json.dump(requirements, file)
# Write serialized relations
relations = {}
if "relations" in requirements:
collection = get_collection_of_input_entities(data)
relations = serialize_collection_relations(collection)
relations_path = os.path.join(runtime_dir, PYTHON_RELATIONS_FILENAME)
with open(relations_path, "w") as file:
json.dump(relations, file)
# Generate volume maps required to expose needed files.
volume_maps = {
PYTHON_RUNTIME_DIRNAME: PYTHON_RUNTIME_VOLUME,
PYTHON_PROGRAM_FILENAME: PYTHON_PROGRAM_VOLUME,
PYTHON_INPUTS_FILENAME: PYTHON_INPUTS_VOLUME,
PYTHON_RELATIONS_FILENAME: PYTHON_RELATIONS_VOLUME,
PYTHON_REQUIREMENTS_FILENAME: PYTHON_REQUIREMENTS_VOLUME,
}
return volume_maps
| null |
resolwe/flow/execution_engines/python/__init__.py
|
__init__.py
|
py
| 5,492 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "resolwe.flow.execution_engines.base.BaseExecutionEngine",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "resolwe.process.parser.SafeParser",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "shlex.quote",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "shlex.quote",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "inspect.getsourcefile",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "resolwe.process",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "shutil.copytree",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "os.chmod",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "os.chmod",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "resolwe.flow.models.utils.hydrate_input_references",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "resolwe.flow.models.utils.hydrate_input_uploads",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "resolwe.flow.models.utils.get_collection_of_input_entities",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "resolwe.flow.models.utils.serialize_collection_relations",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 132,
"usage_type": "call"
}
] |
617140442
|
# Q: will the model be able to predict single traits at a time better than all 5 traits at the same time?
# train models on each trait
# only for face and bg. NOT all
import chainer
import numpy as np
from deepimpression2.model_59 import Deepimpression
import deepimpression2.constants as C
from chainer.functions import sigmoid_cross_entropy, mean_absolute_error, softmax_cross_entropy
from chainer.optimizers import Adam
import h5py as h5
import deepimpression2.paths as P
# import deepimpression2.chalearn20.data_utils as D
import deepimpression2.chalearn30.data_utils as D
import time
from chainer.backends.cuda import to_gpu, to_cpu
import deepimpression2.util as U
import os
import cupy as cp
from chainer.functions import expand_dims
from random import shuffle
my_model = Deepimpression()
load_model = True
if load_model:
p = os.path.join(P.MODELS, 'epoch_19_59_S')
chainer.serializers.load_npz(p, my_model)
print('model loaded')
continuefrom = 0
else:
continuefrom = 0
# optimizer = Adam(alpha=0.0002, beta1=0.5, beta2=0.999, eps=10e-8, weight_decay_rate=0.0001)
my_optimizer = Adam(alpha=0.0002, beta1=0.5, beta2=0.999, eps=10e-8)
my_optimizer.setup(my_model)
if C.ON_GPU:
my_model = my_model.to_gpu(device=C.DEVICE)
print('Initializing')
print('model initialized with %d parameters' % my_model.count_params())
# epochs = C.EPOCHS
epochs = 1
train_labels = h5.File(P.CHALEARN_TRAIN_LABELS_20, 'r')
val_labels = h5.File(P.CHALEARN_VAL_LABELS_20, 'r')
test_labels = h5.File(P.CHALEARN_TEST_LABELS_20, 'r')
train_loss = []
pred_diff_train = np.zeros((epochs, 1), float)
val_loss = []
pred_diff_val = np.zeros((epochs, 1), float)
test_loss = []
pred_diff_test = np.zeros((epochs, 1), float)
training_steps = len(train_labels) // C.TRAIN_BATCH_SIZE
val_steps = len(val_labels) // C.VAL_BATCH_SIZE
test_steps = len(test_labels) // C.TEST_BATCH_SIZE
# training_steps = 10
# val_steps = 10
id_frames = h5.File(P.NUM_FRAMES, 'r')
def run(which, steps, which_labels, frames, model, optimizer, pred_diff, loss_saving, which_data, trait, ordered=False,
save_all_results=False, record_predictions=False, record_loss=True):
print('steps: ', steps)
assert(which in ['train', 'test', 'val'])
assert(which_data in ['bg', 'face'])
assert(trait in ['O', 'C', 'E', 'A', 'S'])
if which == 'train':
which_batch_size = C.TRAIN_BATCH_SIZE
elif which == 'val':
which_batch_size = C.VAL_BATCH_SIZE
elif which == 'test':
which_batch_size = C.TEST_BATCH_SIZE
loss_tmp = []
pd_tmp = np.zeros((steps, 1), dtype=float)
_labs = list(which_labels)
preds = np.zeros((steps, 1), dtype=float)
if not ordered:
shuffle(_labs)
ts = time.time()
for s in range(steps):
# HERE
if which == 'test':
print(s)
# HERE
labels_selected = _labs[s * which_batch_size:(s + 1) * which_batch_size]
assert (len(labels_selected) == which_batch_size)
labels, data, _ = D.load_data_single(labels_selected, which_labels, frames, which_data, resize=True,
ordered=ordered, trait=trait)
if C.ON_GPU:
data = to_gpu(data, device=C.DEVICE)
labels = to_gpu(labels, device=C.DEVICE)
with cp.cuda.Device(C.DEVICE):
if which == 'train':
config = True
else:
config = False
with chainer.using_config('train', config):
if which == 'train':
model.cleargrads()
prediction, _ = model(data)
loss = mean_absolute_error(prediction, labels)
if which == 'train':
loss.backward()
optimizer.update()
if record_loss:
loss_tmp.append(float(loss.data))
pd_tmp[s] = U.pred_diff_trait(to_cpu(prediction.data), to_cpu(labels))
if record_predictions and which == 'test':
preds[s] = to_cpu(prediction.data)
if record_loss:
pred_diff[e] = np.mean(pd_tmp, axis=0)
loss_tmp_mean = np.mean(loss_tmp, axis=0)
loss_saving.append(loss_tmp_mean)
print('E %d. %s loss: ' %(e, which), loss_tmp_mean,
' pred diff %s: ' % trait, pred_diff[e],
' time: ', time.time() - ts)
U.record_loss_sanity(which, loss_tmp_mean, pred_diff[e])
if which == 'test' and save_all_results:
U.record_loss_all_test(loss_tmp, trait=True)
if record_predictions and which == 'test':
U.record_all_predictions(which, preds)
print('Enter training loop with validation')
for e in range(continuefrom, epochs):
which_trait = 'S' # O C E A S
train_on = 'bg'
validate_on = 'bg'
# print('trained on: %s val on: %s for trait %s' % (train_on, validate_on, which_trait))
test_on = 'bg'
print('trained on: %s test on %s for trait %s' % (train_on, test_on, which_trait))
# ----------------------------------------------------------------------------
# training
# ----------------------------------------------------------------------------
# run(which='train', steps=training_steps, which_labels=train_labels, frames=id_frames,
# model=my_model, optimizer=my_optimizer, pred_diff=pred_diff_train,
# loss_saving=train_loss, which_data=train_on, trait=which_trait)
# ----------------------------------------------------------------------------
# validation
# ----------------------------------------------------------------------------
# run(which='val', steps=val_steps, which_labels=val_labels, frames=id_frames,
# model=my_model, optimizer=my_optimizer, pred_diff=pred_diff_val,
# loss_saving=val_loss, which_data=validate_on, trait=which_trait)
# ----------------------------------------------------------------------------
# test
# ----------------------------------------------------------------------------
times = 1
for i in range(1):
if times == 1:
ordered = True
save_all_results = True
else:
ordered = False
save_all_results = False
run(which='test', steps=test_steps, which_labels=test_labels, frames=id_frames,
model=my_model, optimizer=my_optimizer, pred_diff=pred_diff_test,
loss_saving=test_loss, which_data=test_on, ordered=ordered, save_all_results=save_all_results,
trait=which_trait, record_loss=False, record_predictions=True)
# best val 'bg': epoch_59_60_O, epoch_79_60_C, epoch_89_60_E, epoch_89_60_A, epoch_89_60_S
# best val 'face' OCEAS: epoch_39_59_O, epoch_49_59_C, epoch_99_59_E, epoch_89_59_A, epoch_19_59_S
# save model
# if ((e + 1) % 10) == 0:
# name = os.path.join(P.MODELS, 'epoch_%d_60_%s' % (e, which_trait))
# chainer.serializers.save_npz(name, my_model)
| null |
training_single_trait_f_bg.py
|
training_single_trait_f_bg.py
|
py
| 6,954 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "deepimpression2.model_59.Deepimpression",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "deepimpression2.paths.MODELS",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "deepimpression2.paths",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "chainer.serializers.load_npz",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "chainer.serializers",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "chainer.optimizers.Adam",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "deepimpression2.constants.ON_GPU",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "deepimpression2.constants",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "deepimpression2.constants.DEVICE",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "deepimpression2.constants",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "h5py.File",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "deepimpression2.paths.CHALEARN_TRAIN_LABELS_20",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "deepimpression2.paths",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "h5py.File",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "deepimpression2.paths.CHALEARN_VAL_LABELS_20",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "deepimpression2.paths",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "h5py.File",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "deepimpression2.paths.CHALEARN_TEST_LABELS_20",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "deepimpression2.paths",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "deepimpression2.constants.TRAIN_BATCH_SIZE",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "deepimpression2.constants",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "deepimpression2.constants.VAL_BATCH_SIZE",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "deepimpression2.constants",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "deepimpression2.constants.TEST_BATCH_SIZE",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "deepimpression2.constants",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "h5py.File",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "deepimpression2.paths.NUM_FRAMES",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "deepimpression2.paths",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "deepimpression2.constants.TRAIN_BATCH_SIZE",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "deepimpression2.constants",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "deepimpression2.constants.VAL_BATCH_SIZE",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "deepimpression2.constants",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "deepimpression2.constants.TEST_BATCH_SIZE",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "deepimpression2.constants",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "deepimpression2.chalearn30.data_utils.load_data_single",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "deepimpression2.chalearn30.data_utils",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "deepimpression2.constants.ON_GPU",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "deepimpression2.constants",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "chainer.backends.cuda.to_gpu",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "deepimpression2.constants.DEVICE",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "deepimpression2.constants",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "chainer.backends.cuda.to_gpu",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "deepimpression2.constants.DEVICE",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "deepimpression2.constants",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "cupy.cuda.Device",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "cupy.cuda",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "deepimpression2.constants.DEVICE",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "deepimpression2.constants",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "chainer.using_config",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "chainer.functions.mean_absolute_error",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "deepimpression2.util.pred_diff_trait",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "deepimpression2.util",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "chainer.backends.cuda.to_cpu",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "chainer.backends.cuda.to_cpu",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "deepimpression2.util.record_loss_sanity",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "deepimpression2.util",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "deepimpression2.util.record_loss_all_test",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "deepimpression2.util",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "deepimpression2.util.record_all_predictions",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "deepimpression2.util",
"line_number": 146,
"usage_type": "name"
}
] |
187405588
|
from datetime import timedelta
from django.contrib import admin
from django.utils import timezone
class LastTestFilter(admin.SimpleListFilter):
title = 'last test'
parameter_name = 'last'
def lookups(self, request, model_admin):
return (
('T', 'Today'),
('W', 'Last 7 days'),
('MW', 'More than a week ago'),
('MM', 'More than a month ago'),
('MY', 'More than a year ago'),
)
def queryset(self, request, queryset):
if self.value() == 'T':
limit = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
return queryset.filter(measurement__finished__gte=limit)
elif self.value() == 'W':
limit = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0) - timedelta(days=7)
return queryset.filter(measurement__finished__gte=limit)
elif self.value() == 'MW':
limit = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0) - timedelta(days=7)
return queryset.exclude(measurement__finished__gte=limit)
elif self.value() == 'MM':
limit = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0) - timedelta(days=30)
return queryset.exclude(measurement__finished__gte=limit)
elif self.value() == 'MY':
limit = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0) - timedelta(days=365)
return queryset.exclude(measurement__finished__gte=limit)
else:
return queryset
class RetryFilter(admin.SimpleListFilter):
title = 'retry'
parameter_name = 'retry'
def lookups(self, request, model_admin):
return (
('Y', 'Is a retry'),
('N', 'Is not a retry'),
)
def queryset(self, request, queryset):
if self.value() == 'Y':
return queryset.exclude(retry_for=None)
elif self.value() == 'N':
return queryset.filter(retry_for=None)
else:
return queryset
class StateFilter(admin.SimpleListFilter):
title = 'state'
parameter_name = 'state'
def lookups(self, request, model_admin):
return (
('R', 'Requested'),
('S', 'Started'),
('F', 'Finished'),
)
def queryset(self, request, queryset):
if self.value() == 'R':
return queryset.filter(started=None, finished=None)
elif self.value() == 'S':
return queryset.exclude(started=None).filter(finished=None)
elif self.value() == 'F':
return queryset.exclude(finished=None)
else:
return queryset
def score_filter(attribute):
class ScoreFilter(admin.SimpleListFilter):
title = attribute.replace('_', ' ')
parameter_name = attribute
def lookups(self, request, model_admin):
return (
('N', 'Untested'),
('U', 'Unreachable'),
('B', 'Poor'),
('G', 'Mediocre'),
('P', 'Good'),
)
def queryset(self, request, queryset):
if self.value() == 'N':
condition = {attribute: None}
elif self.value() == 'U':
condition = {attribute: 0}
elif self.value() == 'B':
condition = {attribute + '__gt': 0, attribute + '__lt': 0.8}
elif self.value() == 'G':
condition = {attribute + '__gte': 0.8, attribute + '__lt': 0.95}
elif self.value() == 'P':
condition = {attribute + '__gte': 0.95}
else:
return queryset
return queryset.filter(**condition)
return ScoreFilter
| null |
v6score/filter.py
|
filter.py
|
py
| 3,784 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.contrib.admin.SimpleListFilter",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.SimpleListFilter",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.SimpleListFilter",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.SimpleListFilter",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 82,
"usage_type": "name"
}
] |
198766481
|
import sys, warnings
import tweepy
from tw_auth import access_token_key, access_token_secret, consumer_key, consumer_secret
warnings.simplefilter ( 'ignore' )
auth = tweepy.OAuthHandler ( consumer_key, consumer_secret )
auth.set_access_token( access_token_key, access_token_secret )
api = tweepy.API ( auth )
tweet = sys.argv [1]
if len ( sys.argv ) > 3:
response_to = sys.argv [2]
else:
response_to = None
print ( "Tweeting: %s" % tweet )
recents = api.update_status ( tweet, response_to )
| null |
tweet_twitter.py
|
tweet_twitter.py
|
py
| 503 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "warnings.simplefilter",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "tweepy.OAuthHandler",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "tw_auth.consumer_key",
"line_number": 7,
"usage_type": "argument"
},
{
"api_name": "tw_auth.consumer_secret",
"line_number": 7,
"usage_type": "argument"
},
{
"api_name": "tw_auth.access_token_key",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "tw_auth.access_token_secret",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "tweepy.API",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 13,
"usage_type": "attribute"
}
] |
615110955
|
import numpy as np
from sklearn.cluster import KMeans
import math
import time
nodesfile = open("dataset/cal.cnode","r")
edgesfile = open("dataset/cal.cedge","r")
nodes = []
edges = []
for node in nodesfile:
nodes.append(node.split())
for edge in edgesfile:
edges.append(edge.split())
newnodes = open("dataset/newcal.cnode","r")
trimmednodes = []
counter = 0
for node in newnodes:
trimmednodes.append(node.split())
counter += 1
X = np.array(trimmednodes)
def get_adjacent_nodes(node,edges):
adj_nodes = []
for edge in edges:
if(str(node) == edge[1]):
adj_nodes.append(edge[2])
elif(str(node) == edge[2]):
adj_nodes.append(edge[1])
return adj_nodes
def get_edge_dist(node1,node2):
for edge in edges:
if(str(node1) == edge[1]):
if(str(node2) == edge[2]):
return float(edge[3])
elif(str(node1) == edge[2]):
if(str(node2) == edge[1]):
return float(edge[3])
def get_cluster_of_node(clusters,node):
for cluster in range(len(clusters)):
if(int(node) in clusters[cluster]):
return cluster
def get_xy_node(mynode,nodes):
for node in nodes:
if(str(mynode) == node[0]):
return [node[1],node[2]]
def euclidean_distance(node1,node2,nodes):
xy1 = get_xy_node(node1,nodes)
xy2 = get_xy_node(node2,nodes)
return math.sqrt((float(xy1[0])-float(xy2[0]))**2 + (float(xy1[1])-float(xy2[1]))**2)
def get_clusters(k,array):
# k = 1345
km=KMeans(n_clusters=k)
km=km.fit(array)
cluster_labels=km.labels_ # get cluster label of all data
print("cluster labels of points:", cluster_labels)
clusters = []
for i in range(k):
clusters.append(np.where(cluster_labels==i)[0])
print("indexes of points in cluster ",i,":", np.where(cluster_labels==i)[0])
return clusters
def dist_bw_clusters(cluster1,cluster2,nodes,clusters):
min_distance = 9999999
for node1 in clusters[cluster1]:
for node2 in clusters[cluster2]:
dist = euclidean_distance(node1,node2,nodes)
if(dist < min_distance):
min_distance = dist
return min_distance
def astar(startnode,endnode,edges,nodes,clusters):
open_list = [startnode]
closed_list = []
parent = {}
fn = {}
gn = {}
hn = {}
gn[startnode] = 0
hn[startnode] = dist_bw_clusters(get_cluster_of_node(clusters,startnode),get_cluster_of_node(clusters,endnode),nodes,clusters)
fn[startnode] = gn[startnode] + hn[startnode]
# clusters = get_clusters(1345,edges)
while(len(open_list)!=0):
# print("Open: ",open_list)
# print("Closed",closed_list)
min_node = None
min_fn_value = 9999999
for node in open_list:
if(min_fn_value > fn[node]):
min_node = node
min_fn_value = fn[node]
closed_list.append(min_node)
open_list.remove(min_node)
adj_nodes2 = get_adjacent_nodes(min_node,edges)
for adj_node in adj_nodes2:
if(adj_node in closed_list):
pass
elif(adj_node not in open_list):
open_list.append(adj_node)
parent[adj_node] = min_node
gn[adj_node] = gn[min_node] + get_edge_dist(adj_node,min_node)
hn[adj_node] = dist_bw_clusters(get_cluster_of_node(clusters,adj_node),get_cluster_of_node(clusters,endnode),nodes,clusters)
fn[adj_node] = gn[adj_node] + hn[adj_node]
elif(adj_node in open_list):
if(gn[min_node] + get_edge_dist(adj_node,min_node)<gn[adj_node]):
parent[adj_node] = min_node
gn[adj_node] = gn[min_node] + get_edge_dist(adj_node,min_node)
hn[adj_node] = dist_bw_clusters(get_cluster_of_node(clusters,adj_node),get_cluster_of_node(clusters,endnode),nodes,clusters)
fn[adj_node] = gn[adj_node] + hn[adj_node]
if(endnode in closed_list):
break
if(len(open_list)==0):
print("Failed")
print("Distance value: ",fn[min_node]," degrees in latitude")
print("Route: ")
while(min_node != startnode):
print(min_node)
min_node = parent[min_node]
print(startnode)
clusters = get_clusters(1345,X)
# c1 = get_cluster_of_node(clusters,"326")
# c2 = get_cluster_of_node(clusters,"0")
# print(dist_bw_clusters(c1,c2,nodes,clusters))
start = time.time()
astar("0","277",edges,nodes,clusters)
end = time.time()
print("Execution time: ",(end-start))
| null |
main_with_kmeans_clustering.py
|
main_with_kmeans_clustering.py
|
py
| 4,034 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.array",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "sklearn.cluster.KMeans",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 140,
"usage_type": "call"
}
] |
107888745
|
from django.urls import path
from ordermanager.views import OrderView, PartnerOrders, BasketView
app_name = 'ordermanager'
urlpatterns = [
path('partner/orders', PartnerOrders.as_view(), name='partner-orders'),
path('order', OrderView.as_view(), name='order'),
path('basket', BasketView.as_view(), name='basket'),
]
| null |
ordermanager/urls.py
|
urls.py
|
py
| 332 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "ordermanager.views.PartnerOrders.as_view",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "ordermanager.views.PartnerOrders",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "ordermanager.views.OrderView.as_view",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "ordermanager.views.OrderView",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "ordermanager.views.BasketView.as_view",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "ordermanager.views.BasketView",
"line_number": 11,
"usage_type": "name"
}
] |
568578492
|
import torch
import random
import os
import csv
import itertools
import typing
import numpy as np
def list_dir(root: str, prefix: bool = False) -> typing.List[str]:
"""List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
"""
root = os.path.expanduser(root)
directories = [p for p in os.listdir(root) if os.path.isdir(os.path.join(root, p))]
if prefix is True:
directories = [os.path.join(root, d) for d in directories]
return directories
def list_files(root: str, suffix: str, prefix: bool = False) -> typing.List[str]:
"""List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the files found
"""
root = os.path.expanduser(root)
files = [p for p in os.listdir(root) if os.path.isfile(os.path.join(root, p)) and p.endswith(suffix)]
if prefix is True:
files = [os.path.join(root, d) for d in files]
return files
def train_val_split(X: typing.List[typing.List[np.ndarray]], k_shot: int, shuffle: bool = True) -> typing.Tuple[np.ndarray, typing.List[int], np.ndarray, typing.List[int]]:
"""Split data into train and validation
Args:
X: a list of sub-list of numpy array.
Each sub-list consists of data belonging to the same class
k_shot: number of training data per class
shuffle: shuffle data before splitting
Returns:
"""
# get information of image size
nc, iH, iW = X[0][0].shape
v_shot = len(X[0]) - k_shot
num_classes = len(X)
x_t = np.empty(shape=(num_classes, k_shot, nc, iH, iW))
x_v = np.empty(shape=(num_classes, v_shot, nc, iH, iW))
y_t = [0] * num_classes * k_shot
y_v = [0] * num_classes * v_shot
for cls_id in range(num_classes):
if shuffle:
random.shuffle(x=X[cls_id]) # in-place shuffle data within the same class
x_t[cls_id, :, :, :, :] = np.array(X[cls_id][:k_shot])
x_v[cls_id, :, :, :, :] = np.array(X[cls_id][k_shot:])
y_t[k_shot * cls_id: k_shot * (cls_id + 1)] = [cls_id] * k_shot
y_v[v_shot * cls_id: v_shot * (cls_id + 1)] = [cls_id] * v_shot
x_t = np.concatenate(x_t, axis=0)
x_v = np.concatenate(x_v, axis=0)
return x_t, y_t, x_v, y_v
def get_episodes(episode_file_path: typing.Optional[str] = None, num_episodes: int = 100) -> typing.List[str]:
"""Get episodes from a file
Args:
episode_file_path:
num_episodes: dummy variable in training to create an infinite
episode (str) generator. In testing, it defines how many
episodes to evaluate
Return: an episode (str) generator
"""
# get episode list if not None
if episode_file_path is not None:
episodes = []
with open(file=episode_file_path, mode='r') as f_csv:
csv_rd = csv.reader(f_csv, delimiter=',')
episodes = list(csv_rd)
else:
episodes = [None] * num_episodes
return episodes
def _weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
if m.weight is not None:
torch.nn.init.kaiming_normal_(m.weight.data)
if m.bias is not None:
torch.nn.init.zeros_(m.bias.data)
elif classname.find('BatchNorm') != -1:
if m.weight is not None:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0)
def euclidean_distance(matrixN: torch.Tensor, matrixM: torch.Tensor) -> torch.Tensor:
"""Calculate Euclidean distance from N points to M points
Args:
matrixN: an N x D matrix for N points
matrixM: a M x D matrix for M points
Returns: N x M matrix
"""
N = matrixN.size(0)
M = matrixM.size(0)
D = matrixN.size(1)
assert D == matrixM.size(1)
matrixN = matrixN.unsqueeze(1).expand(N, M, D)
matrixM = matrixM.unsqueeze(0).expand(N, M, D)
return torch.norm(input=matrixN - matrixM, p='fro', dim=2)
def get_cls_prototypes(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""Calculate the prototypes/centroids
Args:
x: input data
y: corresponding labels
Returns: a tensor of prototypes with shape (C, d),
where C is the number of classes, d is the embedding dimension
"""
_, d = x.shape
cls_idx = torch.unique(input=y, return_counts=False)
C = cls_idx.shape[0]
prototypes = torch.empty(size=(C, d), device=x.device)
for c in range(C):
prototypes[c, :] = torch.mean(input=x[y == cls_idx[c]], dim=0)
return prototypes
def kl_divergence_gaussians(p: typing.List[torch.Tensor], q: typing.List[torch.Tensor]) -> torch.Tensor:
"""Calculate KL divergence between 2 diagonal Gaussian
Args: each paramter is list with 1st half as mean, and the 2nd half is log_std
Returns: KL divergence
"""
assert len(p) == len(q)
n = len(p) // 2
kl_div = 0
for i in range(n):
p_mean = p[i]
p_log_std = p[n + i]
q_mean = q[i]
q_log_std = q[n + i]
s1_vec = torch.exp(input=2 * q_log_std)
mahalanobis = torch.sum(input=torch.square(input=p_mean - q_mean) / s1_vec)
tr_s1inv_s0 = torch.sum(input=torch.exp(input=2 * (p_log_std - q_log_std)))
log_det = 2 * torch.sum(input=q_log_std - p_log_std)
kl_div_temp = mahalanobis + tr_s1inv_s0 + log_det - torch.numel(p_mean)
kl_div_temp = kl_div_temp / 2
kl_div = kl_div + kl_div_temp
return kl_div
def vector_to_list_parameters(vec: torch.Tensor, parameter_shapes: typing.List) -> torch.Tensor:
"""
"""
params = []
# Pointer for slicing the vector for each parameter
pointer = 0
for param_shape in parameter_shapes:
# The length of the parameter
num_param = np.prod(a=param_shape)
params.append(vec[pointer:pointer + num_param].view(param_shape))
# Increment the pointer
pointer += num_param
return params
def intialize_parameters(state_dict: dict) -> typing.List[torch.Tensor]:
""""""
p = list(state_dict.values())
for m in p:
if m.ndim > 1:
torch.nn.init.kaiming_normal_(tensor=m, nonlinearity='relu')
else:
torch.nn.init.zeros_(tensor=m)
return p
| null |
_utils.py
|
_utils.py
|
py
| 6,821 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.expanduser",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.expanduser",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "numpy.empty",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "typing.Tuple",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "csv.reader",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.init.kaiming_normal_",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.init.zeros_",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.init.normal_",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.init.constant_",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "torch.norm",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "torch.unique",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "torch.empty",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "torch.exp",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "torch.square",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "torch.exp",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "torch.numel",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "numpy.prod",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "torch.nn.init.kaiming_normal_",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 203,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.init.zeros_",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 205,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 198,
"usage_type": "attribute"
}
] |
521377830
|
import sys
import time
import traceback
from django import db
from common import constants, logging_util
from daemons.Daemon3 import Daemon
class ExampleDaemon3(Daemon):
def __init__(self, pidfile):
super().__init__(pidfile=pidfile)
self._daemon_name = "ExampleDaemon3"
self._logger = logging_util.get_logger(self._daemon_name)
def _run_task_one(self):
"""
Handle a daemon task.
"""
self._logger.info("_run_task_one")
def _run_task_two(self):
"""
Handle another daemon task.
"""
self._logger.info("_run_task_two")
def run(self):
"""
Controls the flow of the Daemon tasks.
"""
self._logger.debug("***> {0} Started <***".format(self._daemon_name))
while True:
self._logger.info("==> Start {0} Run <==".format(self._daemon_name))
try:
# Daemon task 1
self._run_task_one()
# Daemon task 2
self._run_task_two()
except db.utils.OperationalError as err:
self._logger.error("{0}\n{1}".format(type(err), traceback.format_exc()))
db.connection.close()
except Exception as err:
self._logger.critical("{0}\n{1}".format(type(err), traceback.format_exc()))
time.sleep(int(constants.DAEMON_SLEEP_TIME))
daemon = ExampleDaemon3(constants.PID_EXAMPLE_DAEMON)
# Check to see if we're running under the debugger,
# If we are then bypass the daemonize and just run directly.
if sys.gettrace() is not None:
daemon.run()
else:
daemon.perform_action()
| null |
daemons/example/ExampleDaemon3.py
|
ExampleDaemon3.py
|
py
| 1,666 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "daemons.Daemon3.Daemon",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "common.logging_util.get_logger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "common.logging_util",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.db.utils",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "django.db",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "traceback.format_exc",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "django.db.connection.close",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "django.db.connection",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "django.db",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "traceback.format_exc",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "common.constants.DAEMON_SLEEP_TIME",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "common.constants",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "common.constants.PID_EXAMPLE_DAEMON",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "common.constants",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "sys.gettrace",
"line_number": 54,
"usage_type": "call"
}
] |
448236936
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 13 12:10:30 2020
@author: parag.patil
"""
import pdftotext
import sys
import os
pwd=os.getcwd()
from pdf2image import convert_from_path
import ocrmypdf
import PIL
def extract_text(pdf_file,output_path):
try:
#print (f)
pages = convert_from_path(pdf_file,dpi=120)
images = pages
file="temp.pdf"
images[0].save(pwd+"/"+file, save_all=True, append_images=images[1:],quality=40, optimize=True)
ocrmypdf.ocr(pwd+"/"+file,pwd+"/"+file,use_threads=False,progress_bar=True) #,language="eng+hin")
with open(pwd+"/"+file, "rb") as f:
pdf = pdftotext.PDF(f)
os.remove(pwd+"/"+file)
text=""
for p in range(0,len(pdf)):
text=text+pdf[p]+"\n"
pdf_file = os.path.split(pdf_file)[-1]
filename, file_extension = os.path.splitext(pdf_file)
output=output_path+"/"+filename+".txt"
#print output
with open(output, 'w') as f:
for item in text.split('\n'):
f.write("%s\n" % item)
except Exception:
print("ignoring file ",file)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
pass
if __name__ == "__main__":
input_path = sys.argv[1]
output_path=sys.argv[2]
#os.chdir(input_path)
# input_path = "/run/user/1000/gvfs/smb-share:server=sbr-qnap-02,share=data_scientist/ALL_JUDGEMENTS/US_Courts_2/"
# output_path = "/home/ganeshkharad/New_Data_Scientist/1_JET/2_US/1_Extract/usa_text_c2/"
# pdf_list = os.listdir(input_path) #[f for f in glob.glob("*.pdf")]
pdf_list = list()
#processed_case_filenames = list()
dirName = input_path #"/home/ganeshkharad/Downloads/All_Judgements/UK/"
for (dirpath, dirnames, filenames) in os.walk(dirName):
pdf_list += [os.path.join(dirpath, file) for file in filenames]
i=0
for pdf in pdf_list:
print (i,pdf)
try:
extract_text(pdf,output_path)
except:
pass
#os.remove(pdf)
i=i+1
print ("completed the conversion of pdf files")
| null |
pdf_to_text_conversion_OCR_eng_PP_20200213.py
|
pdf_to_text_conversion_OCR_eng_PP_20200213.py
|
py
| 2,450 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.getcwd",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pdf2image.convert_from_path",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "ocrmypdf.ocr",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pdftotext.PDF",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "sys.exc_info",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 82,
"usage_type": "attribute"
}
] |
597790394
|
import os
import time
from gpiozero import Buzzer
buzzer = Buzzer(22)
def dot():
buzzer.on()
time.sleep(0.1)
buzzer.off()
time.sleep(0.2)
def dash():
buzzer.on()
time.sleep(0.3)
buzzer.off()
time.sleep(0.1)
def letterSpace():
time.sleep(0.2)
def wordSpace():
time.sleep(0.6)
def morseS():
dot()
dot()
dot()
def morseO():
dash()
dash()
dash()
os.system('clear')
print("Morse code")
loop_count = input("How many times would you like the SOS to loop? ")
loop_count = int(loop_count)
while loop_count > 0:
morseS()
letterSpace()
morseO()
letterSpace()
morseS()
wordSpace()
loop_count = loop_count - 1
| null |
buzzer/buzzer.py
|
buzzer.py
|
py
| 632 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "gpiozero.Buzzer",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 35,
"usage_type": "call"
}
] |
338873798
|
import itertools
a = [i for i in range(0,100)]
b = [i for i in range(0,100)]
# result contains all possible combinations.
combinations = list(itertools.product(a,b))
OPCODE_ADD = 1
OPCODE_MULTIPLY = 2
OPCODE_HALT = 99
def search_pair(positions, target):
combo = 0
while combo < len(combinations):
cur = 0
# reset program memory
cur_positions = positions.copy()
noun, verb = combinations[combo]
if intcode(cur_positions, noun, verb) == target: break
combo += 1
return combinations[combo]
def intcode(positions, noun, verb):
cur = 0
positions[1], positions[2] = noun, verb
while cur < len(positions):
opcode = positions[cur]
if opcode == OPCODE_HALT:
break
# print(cur, len(positions))
if opcode == OPCODE_ADD:
# print(positions[cur+3])
positions[positions[cur+3]] = positions[positions[cur+1]] + positions[positions[cur+2]]
elif opcode == OPCODE_MULTIPLY:
positions[positions[cur+3]] = positions[positions[cur+1]] * positions[positions[cur+2]]
cur += 4
return positions[0]
with open('input.txt','r') as f:
cur = 0
positions = f.read().split(',')
positions = list(map(int, positions))
# part 1
print('position at 0', intcode(positions.copy(), 12, 2))
# part 2
v1, v2 = search_pair(positions.copy(), 19690720)
print(v1, v2, intcode(positions.copy(), v1, v2))
| null |
day2/day2.py
|
day2.py
|
py
| 1,466 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "itertools.product",
"line_number": 6,
"usage_type": "call"
}
] |
191856066
|
#!/usr/bin/env python
#===============================================================================
# WriteFileInvMgr
#
# Description:
# Script to ...
#
# Print and FileWrite flags defaults can be changed in the main method
#
# Version:
# MM.mm DD/MM/YY
# 00.00 01/12/17 First version (combined scripts from ReadInvCTC,
# ReadInvEPN)
# 00.01 24/01/18 Update default dir_inv
# 00.02 05/02/18 Delete out-of-place/outdated buildSlotsShelf script
# 00.03 25/07/18 Add dictionary file-write script
# 00.04 01/04/19 Improve functionality of writePyDictFile script to
# make output appear more json-like.
# 00.05 26/07/19 Add checking and creation of PyLog directory
#
# Example/Usage:
#
#
#===============================================================================
from datetime import datetime
from datetime import date
import json
import os
#from tkinter.filedialog import askdirectory
def writePyOutputFile(dirInventory, fileName, listInventory, fileXt):
fileInvOutput = dirInventory + '/' + fileName + fileXt
with open(fileInvOutput,'w') as fileWrite:
for itemHardware in listInventory:
fileWrite.write('{}\n'.format('\t'.join(map(str,itemHardware))))
fileWrite.close()
writeLogMessage('File saved as: ' + fileInvOutput, dirInventory)
def writePyDictFile(dirInventory, fileName, dictInv, fileXt):
fileInvOutput = dirInventory + '/' + fileName + fileXt
with open(fileInvOutput,'w') as fileWrite:
fileWrite.write('{')
for ssp, itemHardware in dictInv.items():
fileWrite.write(str(ssp) + ': ')
if type(itemHardware) == dict:
keyInit = list(itemHardware.keys())[0]
keyLast = list(itemHardware.keys())[len(itemHardware.keys())-1]
fileWrite.write('{}\n'.format('\t{' + str(keyInit) + ':' + str(itemHardware.get(keyInit)) + ','))
for k, v in itemHardware.items():
if k != keyInit and k != keyLast:
fileWrite.write('{}\n'.format('\t' + str(k) + ':' + str(v) + ','))
fileWrite.write('{}\n'.format('\t' + str(keyLast) + ':' + str(itemHardware.get(keyLast)) + '},'))
else:
fileWrite.write('{}\n'.format(str(itemHardware)))
fileWrite.write('}')
fileWrite.close()
writeLogMessage('File saved as: ' + fileInvOutput, dirInventory)
def writePyJSONFile(dirInventory, fileName, dictInv, fileXt):
fileInvOutput = dirInventory + '/' + fileName + fileXt# + '.json'
with open(fileInvOutput, 'w') as fileWrite:
json.dump(dictInv, fileWrite)
fileWrite.close()
writeLogMessage('File saved as: ' + fileInvOutput, dirInventory)
def writePyFile(dirInventory, fileName, listInventory, fileXt):
fileInvOutput = dirInventory + '/' + fileName + fileXt
with open(fileInvOutput,'w') as fileWrite:
for itemHardware in listInventory:
fileWrite.write('{}\n'.format(itemHardware))
fileWrite.close()
writeLogMessage('File saved as: ' + fileInvOutput, dirInventory)
def writeLogMessage(text, dirInventory):
logStatement = 'Log %s>> %s' % (str(datetime.now()), text)
print(logStatement)
writeLogFile(logStatement, dirInventory)
def writeLogFile(logStatement, dirInventory=None):
fileStatement = 'Py' + logStatement
today = date.today()
# Set hard-coded default if none selected
if dirInventory is None or dirInventory == '':
dirInventory = 'C:/Users/beclarke/OneDrive - Cox Communications/Documents/Projects/HubInventory'
if os.path.isdir(dirInventory + '/PyLog/'):
pass
else:
dirInventory = os.getcwd()
if os.path.isdir(dirInventory + '/PyLog/'):
pass
else:
os.mkdir(dirInventory + '/PyLog/')
filext = '.txt'
logFile = dirInventory + '/PyLog/' + str(today) + filext
# try to open if exists
try:
with open(logFile, 'a') as f:
f.write('{}\n'.format(fileStatement))
f.close()
except:
print('Unable to write to PyLog file')
| null |
src/WriteFileInvMgr.py
|
WriteFileInvMgr.py
|
py
| 4,187 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "json.dump",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "datetime.date.today",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "os.path.isdir",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 100,
"usage_type": "call"
}
] |
384401339
|
import psycopg2 as pg
from StockDataWriter.infrastructure.StockDataQuery import PgStockQuery
class PostgresStockData:
cursor = None
conn = None
def connect_db(self, host, database, user):
"""
postgres db에 접속한고 cursor를 할당한다
"""
self.conn = pg.connect(host=host, database=database, user=user)
self.cursor = self.conn.cursor()
def save_code(self, code, name):
query = PgStockQuery.save_item_code(code, name)
self.cursor.execute(query)
self.conn.commit()
def save(self, code, date, open, high, low, close, diff, volume):
"""
주식 데이터를 저장한다
"""
# 코드명 및 데이터로 insert 조회쿼리를 만든다
query = PgStockQuery.save_stock_price(code,
date.date(),
open,
high,
low,
close,
diff,
volume)
try:
self.cursor.execute(query)
except pg.ProgrammingError as e:
if e.pgcode == '42P01':
# 만약 noExist exception이 발생하는 경우 해당 테이블을 생성한다
# 그리고 다시 insert 쿼리를 수행한다
self.conn.commit()
self.cursor.execute(PgStockQuery.create_table(code))
self.cursor.execute(query)
self.conn.commit()
def get(self):
"""
종목 코드별 정보를 가져온다.
:return:
"""
def __exit__(self, exc_type, exc_val, exc_tb):
if self.cursor is not None:
self.cursor.close()
| null |
StockDataWriter/repository/PostgresStockData.py
|
PostgresStockData.py
|
py
| 1,885 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "psycopg2.connect",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "StockDataWriter.infrastructure.StockDataQuery.PgStockQuery.save_item_code",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "StockDataWriter.infrastructure.StockDataQuery.PgStockQuery",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "StockDataWriter.infrastructure.StockDataQuery.PgStockQuery.save_stock_price",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "StockDataWriter.infrastructure.StockDataQuery.PgStockQuery",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "psycopg2.ProgrammingError",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "StockDataWriter.infrastructure.StockDataQuery.PgStockQuery.create_table",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "StockDataWriter.infrastructure.StockDataQuery.PgStockQuery",
"line_number": 42,
"usage_type": "name"
}
] |
423721455
|
from torch_geometric.data import DataLoader
from ocpmodels.common.registry import registry
from ocpmodels.datasets.base import BaseDataset
@registry.register_dataset("iso17")
class ISO17(BaseDataset):
def __init__(self, config, transform=None, pre_transform=None):
super(ISO17, self).__init__(config, transform, pre_transform)
@property
def test_size(self):
if self.config["test_fold"] == "test_within":
return 101000
elif self.config["test_fold"] == "test_other":
return 130000
def get_dataloaders(self, batch_size=None):
assert batch_size is not None
assert self.train_size + self.val_size + 101000 + 130000 <= len(self)
if self.config["test_fold"] == "test_within":
test_dataset = self[-(101000 + 130000) : -130000]
elif self.config["test_fold"] == "test_other":
test_dataset = self[-130000:]
train_val_dataset = self[: self.train_size + self.val_size].shuffle()
train_loader = DataLoader(
train_val_dataset[: self.train_size],
batch_size=batch_size,
shuffle=True,
)
val_loader = DataLoader(
train_val_dataset[
self.train_size : self.train_size + self.val_size
],
batch_size=batch_size,
)
test_loader = DataLoader(test_dataset, batch_size=batch_size)
return train_loader, val_loader, test_loader
| null |
ocpmodels/datasets/iso17.py
|
iso17.py
|
py
| 1,472 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "ocpmodels.datasets.base.BaseDataset",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch_geometric.data.DataLoader",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch_geometric.data.DataLoader",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch_geometric.data.DataLoader",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "ocpmodels.common.registry.registry.register_dataset",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "ocpmodels.common.registry.registry",
"line_number": 7,
"usage_type": "name"
}
] |
637050141
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The Support Vector Regression test for Features 2.
Using StandardScaler instead of MinMaxScaler.
Auto adjusting 3 parameters together.
Using features 2.
Logging y TWICE.
Version 2.3.2
Last Modified at
"""
### Import modules ###
import os
dir_sep_symbol = '\\' if os.name == 'nt' else '/'
## 'nt' stands for Windows, while 'posix' stands for Linux, Mac..
import sys
try:
my_home_path = os.path.split( os.path.realpath(__file__) )[0] + dir_sep_symbol + '..'
except NameError:
my_home_path = os.path.split( os.path.abspath('') )[0]
if os.name == 'nt':
desktop_path = os.environ.get('USERPROFILE') + dir_sep_symbol + 'Desktop'
else:
desktop_path = os.environ.get('HOME') + dir_sep_symbol + 'Desktop'
if my_home_path not in sys.path:
sys.path.insert(0, my_home_path)
try:
from const import const
except (ImportError, ModuleNotFoundError):
import const
const.colorR = 0
const.colorG = 1
const.colorB = 2
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pickle as pkl
from sklearn.svm import SVR
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, GridSearchCV
from Stat.CoefficientOfDetermination import CoefficientOfDetermination as CofD
import time
################################
# Begin My Timer #
beginTime = time.perf_counter()
# Begin My Timer #
################################
_fi = 2 # features index.
_not_interactive_output_results = False
resultsFile = open('SVR_results.txt', 'w') if _not_interactive_output_results else sys.stdout
### Data Preparation ###
# beginTime = time.perf_counter()
with open(my_home_path + dir_sep_symbol + 'Features' +
dir_sep_symbol + 'Features%1dData.dat' % _fi, 'rb') as alldatain:
X_unscaled = pkl.load(alldatain)
y_unlogged = pkl.load(alldatain)
lenAll = pkl.load(alldatain)
## Acquire logarithm of y_unlogged ##
def LogDeal(array: np.ndarray) -> np.ndarray:
return 1000 * np.log10(array + 1)
def RevLogDeal(array: np.ndarray) -> np.ndarray:
return np.power(10, array / 1000) - 1
y = LogDeal(LogDeal(y_unlogged))
## Scale the data
scaler = StandardScaler()
X_train_unscaled, X_test_unscaled, y_train, y_test = train_test_split(X_unscaled, y, random_state = 1)
scaler.fit(X_train_unscaled)
X = scaler.transform(X_unscaled )
X_train = scaler.transform(X_train_unscaled)
X_test = scaler.transform(X_test_unscaled )
lenTrain = len(y_train)
lenTest = len(y_test )
print('Support Vector Regression Calculation.\n', file = resultsFile)
print('Scaler:', str(scaler.__class__)[8:-2], file = resultsFile)
print('Spliter:', 'Training:Testing = 75:25', 'random_state = 1', 'shuffle = False', 'stratify = False',
file = resultsFile)
resultsFile.write('\n\n')
#############################################
### Support Vector Regression Calculation ###
#############################################
demo = '''
rgs = SVR(kernel = 'rbf', gamma = 1., epsilon = 10., C = 100.)
print('Arguments: ', file = resultsFile)
print(rgs.fit(X_train, y_train), file = resultsFile)
predict = rgs.predict(X_test)
actual = y_test
print('Score:', rgs.score(X_test, y_test), file = resultsFile)
resultsFile.write('\n\n')
'''
### Adjust Parameters ###
## adjust gamma
_adjust_gamma = False
if _adjust_gamma:
gammaList = []
scoreList = []
for _gamma in np.arange(0.09, 0.14, 0.01):
rgs = SVR(kernel = 'rbf', gamma = _gamma, epsilon = 50., C = 1.5e4)
print('Arguments: ', file = resultsFile)
print(rgs.fit(X_train, y_train), file = resultsFile)
predict = rgs.predict(X_test)
actual = y_test
_score = CofD(RevLogDeal(RevLogDeal(predict)), RevLogDeal(RevLogDeal(actual)))
# _score = rgs.score(X_test, y_test)
print('Score:', _score, file = resultsFile)
resultsFile.write('\n\n')
gammaList.append(_gamma)
scoreList.append(_score)
fig_gamma = plt.figure(num = 'gamma', figsize = (12.8, 9.6))
ax_gamma = fig_gamma.add_subplot(111)
ax_gamma .set_title ('The Effect of gamma to Score')
ax_gamma .set_xlabel('gamma')
ax_gamma .set_ylabel('score')
ax_gamma .text(0.5, 0.5, 'epsilon = 50., C = 1.5e4',
horizontalalignment = 'center',
verticalalignment = 'center',
transform = ax_gamma.transAxes)
ax_gamma .plot(gammaList, scoreList, color = 'red', marker = 'o')
fig_gamma.savefig('gamma to Score.svg')
fig_gamma.savefig('gamma to Score.png')
print('\nBest gamma: %.1e\n' % gammaList[scoreList.index(max(scoreList))], file = resultsFile)
# gamma should be about 1.2e-01 in this case
## adjust epsilon
_adjust_epsilon = False
if _adjust_epsilon:
epsilonList = []
scoreList = []
for _epsilon in np.arange(20., 45., 5.):
rgs = SVR(kernel = 'rbf', gamma = 1.2e-01, epsilon = _epsilon, C = 1.5e4)
print('Arguments: ', file = resultsFile)
print(rgs.fit(X_train, y_train), file = resultsFile)
predict = rgs.predict(X_test)
actual = y_test
_score = CofD(RevLogDeal(RevLogDeal(predict)), RevLogDeal(RevLogDeal(actual)))
# _score = rgs.score(X_test, y_test)
print('Score:', _score, file = resultsFile)
resultsFile.write('\n\n')
epsilonList.append(_epsilon)
scoreList.append(_score)
fig_epsilon = plt.figure(num = 'epsilon', figsize = (12.8, 9.6))
ax_epsilon = fig_epsilon.add_subplot(111)
ax_epsilon .set_title ('The Effect of epsilon to Score')
ax_epsilon .set_xlabel('epsilon')
ax_epsilon .set_ylabel('score')
ax_epsilon .text(0.5, 0.5, 'gamma = 1.2e-1, C = 1.5e4',
horizontalalignment = 'center',
verticalalignment = 'center',
transform = ax_epsilon.transAxes)
ax_epsilon .plot(epsilonList, scoreList, color = 'green', marker = 'o')
fig_epsilon.savefig('epsilon to Score.svg')
fig_epsilon.savefig('epsilon to Score.png')
print('\nBest epsilon: %.1e\n' % epsilonList[scoreList.index(max(scoreList))], file = resultsFile)
# epsilon should be about 3.0e+01 in this case
## adjust C
_adjust_C = False
if _adjust_C:
CList = []
scoreList = []
for _C in np.arange(1.0e4, 2.0e4, 2e3):
rgs = SVR(kernel = 'rbf', gamma = 1.2e-01, epsilon = 3.0e+01, C = _C)
print('Arguments: ', file = resultsFile)
print(rgs.fit(X_train, y_train), file = resultsFile)
predict = rgs.predict(X_test)
actual = y_test
_score = CofD(RevLogDeal(RevLogDeal(predict)), RevLogDeal(RevLogDeal(actual)))
# _score = rgs.score(X_test, y_test)
print('Score:', _score, file = resultsFile)
resultsFile.write('\n\n')
CList.append(_C)
scoreList.append(_score)
fig_C = plt.figure(num = 'C', figsize = (12.8, 9.6))
ax_C = fig_C.add_subplot(111)
ax_C .set_title ('The Effect of epsilon to Score')
ax_C .set_xlabel('C')
ax_C .set_ylabel('score')
ax_C .text(0.5, 0.5, 'gamma = 1.2e-1, epsilon = 3.0e1.',
horizontalalignment = 'center',
verticalalignment = 'center',
transform = ax_C.transAxes)
ax_C .plot(CList, scoreList, color = 'blue', marker = 'o')
fig_C.savefig('C to Score.svg')
fig_C.savefig('C to Score.png')
print('\nBest C: %.1e\n' % CList[scoreList.index(max(scoreList))], file = resultsFile)
# C should be about 1.4e4. in this case
## adjust three parameters together
_adjust_3_together = False
if _adjust_3_together:
points = 5 # should be an odd
pointsAmount = points ** 3
halfSteps = (points - 1) // 2
gammaGuess = 1.2e-01
epsilonGuess = 3.0e+01
CGuess = 1.4e+04
gammaStep = np.power(10, np.floor(np.log10(gammaGuess) ) - 1) * 5.
epsilonStep = np.power(10, np.floor(np.log10(epsilonGuess)) - 1) * 5.
CStep = np.power(10, np.floor(np.log10(CGuess) ) - 1) * 5.
centerPosition = (pointsAmount - 1) // 2 # Start with 0.
loopIndex = 0
while 1:
loopIndex += 1
print('### Loop %d: ###\n' % loopIndex, file = resultsFile)
print('Chosen point: gamma = %.1e, epsilon = %.1e, C = %.1e'
% (gammaGuess, epsilonGuess, CGuess), file = resultsFile)
scoreList = np.zeros((pointsAmount,), np.float64)
gammaList = np.linspace(gammaGuess - halfSteps * gammaStep,
gammaGuess + halfSteps * gammaStep,
points)
epsilonList = np.linspace(epsilonGuess - halfSteps * epsilonStep,
epsilonGuess + halfSteps * epsilonStep,
points)
CList = np.linspace(CGuess - halfSteps * CStep,
CGuess + halfSteps * CStep,
points)
gammaAxis, epsilonAxis, CAxis = list(_.reshape((-1,))
for _ in np.meshgrid(gammaList,
epsilonList,
CList))
for _ in range(pointsAmount):
rgs = SVR(kernel = 'rbf', gamma = gammaAxis[_], epsilon = epsilonAxis[_], C = CAxis[_])
print('Point: %03d' % (_ + 1), file = resultsFile)
print('Arguments: ', file = resultsFile)
print(rgs.fit(X_train, y_train), file = resultsFile)
# _score = rgs.score(X_test, y_test)
predict = rgs.predict(X_test)
actual = y_test
_score = CofD(RevLogDeal(RevLogDeal(predict)), RevLogDeal(RevLogDeal(actual)))
print('Score:', _score, file = resultsFile)
scoreList[_] = _score
print('Progress: %4.1f%% in loop %d.'
% (round((_ + 1) / pointsAmount * 100, 1), loopIndex),
file = resultsFile)
print('', file = resultsFile)
chosenPosition = list(scoreList).index(max(scoreList))
print('', file = resultsFile)
if chosenPosition == centerPosition: break
gammaGuess = gammaAxis [chosenPosition]
epsilonGuess = epsilonAxis[chosenPosition]
CGuess = CAxis [chosenPosition]
gammaStepUnit = np.power(10, np.floor(np.log10(gammaStep )))
if gammaGuess + halfSteps * gammaStep > gammaStepUnit * 99.5: # Exciting!
gammaStep *= 10
gammaGuess = gammaStepUnit * 100.
elif gammaGuess - halfSteps * gammaStep < gammaStep:
gammaStep /= 10
gammaGuess -= halfSteps * gammaStep
epsilonStepUnit = np.power(10, np.floor(np.log10(epsilonStep)))
if epsilonGuess + halfSteps * epsilonStep > epsilonStepUnit * 99.5: # Exciting!
epsilonStep *= 10
epsilonGuess = epsilonStepUnit * 100.
elif epsilonGuess - halfSteps * epsilonStep < epsilonStep:
epsilonStep /= 10
epsilonGuess -= halfSteps * epsilonStep
CStepUnit = np.power(10, np.floor(np.log10(CStep )))
if CGuess + halfSteps * CStep > CStepUnit * 99.5: # Exciting!
CStep *= 10
CGuess = CStepUnit * 100.
elif CGuess - halfSteps * CStep < CStep:
CStep /= 10
CGuess -= halfSteps * CStep
_draw_adjust_3_together = False
if _draw_adjust_3_together:
scoreMax = max(scoreList)
scoreMin = min(scoreList)
scoreRange = scoreMax - scoreMin
colorAxis = np.array([[0., 0., 0.],] * len(gammaAxis))
for _ in range(len(gammaAxis)):
colorAxis[_][const.colorR] = (scoreList[_] - scoreMin) / scoreRange
colorAxis[_][const.colorB] = (scoreMax - scoreList[_]) / scoreRange
fig_3_together = plt.figure(num = '3 together', figsize = (12.8, 9.6))
ax_3_together = Axes3D(fig_3_together)
ax_3_together .set_title ('3 parameters')
ax_3_together .set_xlabel('gamma')
ax_3_together .set_ylabel('epsilon')
ax_3_together .set_zlabel('C')
ax_3_together .scatter([], [], [], c = '#FF0000', label = 'Best' )
ax_3_together .scatter([], [], [], c = '#0000FF', label = 'Worst')
ax_3_together .scatter(gammaAxis, epsilonAxis, CAxis, c = colorAxis)
ax_3_together .set_xlim3d(left = min(gammaList ), right = max(gammaList ))
ax_3_together .set_ylim3d(bottom = min(epsilonList), top = max(epsilonList))
ax_3_together .set_zlim3d(bottom = min(CList ), top = max(CList ))
ax_3_together .legend(loc = 'best')
fig_3_together.savefig('3 parameters.svg')
fig_3_together.savefig('3 parameters.png')
bestParaLoca = list(scoreList).index(max(scoreList))
print('Best Choise: \ngamma = %.1e, epsilon = %.1e, C = %.1e at point %02d.'
% (gammaAxis[bestParaLoca],
epsilonAxis[bestParaLoca],
CAxis[bestParaLoca],
bestParaLoca + 1)
, file = resultsFile)
# gamma, epsilon and C should be about 4.6e-01, 3.2e+01, and 1.0e+04.
## Adjusting 3 parameters together using GridSearchCV
_GridSearch_3_together = False
if _GridSearch_3_together: pass
## choose the "best" parameters
_test_best_choise = False
if _test_best_choise:
rgs = SVR(kernel = 'rbf', gamma = 4.6e-01, epsilon = 3.2e+01, C = 1.0e+04)
print('Arguments: ', file = resultsFile)
print(rgs.fit(X_train, y_train), file = resultsFile)
predict = rgs.predict(X_test)
actual = y_test
print('Score :', rgs.score(X_test, y_test), file = resultsFile)
print('Score of origin:', CofD(RevLogDeal(RevLogDeal(predict)), RevLogDeal(RevLogDeal(actual))),
file = resultsFile)
resultsFile.write('\n\n')
with open(r'..\Stat\test1.pkl', 'wb') as tmp:
pkl.dump(predict, tmp)
pkl.dump(actual , tmp)
####################################################################################################
################################
# End My Timer #
endTime = time.perf_counter()
# End My Timer #
################################
elapsedTime = endTime - beginTime
print('Time elapsed: %2d min %.1f sec.' %
(elapsedTime // 60, elapsedTime - elapsedTime // 60 * 60)
, file = resultsFile)
### Make result file ###
_output_predict_and_actual_value = False
if _output_predict_and_actual_value:
with open(desktop_path + dir_sep_symbol +'SVR_data_comparation.txt', 'w') as tempfile:
for _ in range(lenTest):
print('Predict:', predict[_], file = tempfile)
print('Actual :', actual[_], file = tempfile)
tempfile.write('\n')
### show measurement ###
# print('Amount of support vectors:', rgs.support_.shape[0] , file = resultsFile)
### Plot to show the fitting result ###
_predict_actual_scatter = False
if _predict_actual_scatter:
fig1 = plt.figure(num = 'predict_actual_scatter', figsize = (12.8, 9.6))
ax1 = fig1.add_subplot(111)
ax1 .set_title ('Relationship between Predict Value and Actual Value')
ax1 .set_xlabel('Predict Value')
ax1 .set_ylabel('Actual Value' )
ax1 .set_ylim(top = max(max(predict), max(actual)) // 1000 * 1000 + 1000)
ax1 .scatter(predict, actual, marker = '.', c = 'black', s = 4)
fig1.savefig('predict_actual_scatter.svg')
fig1.savefig('predict_actual_scatter.png')
_predict_actual_plot = False
if _predict_actual_plot:
actual_sort_indices = np.argsort(actual, axis=0)
actual_sorted = actual [actual_sort_indices]
predict_sorted = predict[actual_sort_indices]
fig2 = plt.figure(num = 'predict_actual_plot', figsize = (12.8, 9.6))
ax2 = fig2.add_subplot(111)
ax2 .set_title ('Plot of Predict Value and Actual Value')
ax2 .set_xlabel('Samples')
ax2 .set_ylabel('Value' )
ax2 .set_ylim(top = max(max(predict), max(actual)) // 1000 * 1000 + 1000)
ax2 .plot(predict_sorted, color = 'magenta', linewidth = 1., label = 'Predict')
ax2 .plot(actual_sorted , color = 'cyan' , linewidth = 1., label = 'Actual' )
ax2 .legend(loc = 'best')
fig2.savefig('predict_actual_plot.svg')
fig2.savefig('predict_actual_plot.png')
### Close File ###
plt.show()
if resultsFile != sys.stdout: resultsFile.close()
### Test ###
if __name__ == '__main__': print('SVR Done.')
| null |
SVR/backup/models/StandardScaler/SVR_v2.3.2.py
|
SVR_v2.3.2.py
|
py
| 16,675 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.name",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "os.path.split",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.name",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "sys.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "sys.path.insert",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "const.colorR",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "const.colorG",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "const.colorB",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "time.perf_counter",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "numpy.log10",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "numpy.power",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVR",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "Stat.CoefficientOfDetermination.CoefficientOfDetermination",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVR",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "Stat.CoefficientOfDetermination.CoefficientOfDetermination",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVR",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "Stat.CoefficientOfDetermination.CoefficientOfDetermination",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 207,
"usage_type": "name"
},
{
"api_name": "numpy.power",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "numpy.floor",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "numpy.floor",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "numpy.floor",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 241,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVR",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "Stat.CoefficientOfDetermination.CoefficientOfDetermination",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "numpy.floor",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "numpy.floor",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "numpy.floor",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "const.colorR",
"line_number": 305,
"usage_type": "attribute"
},
{
"api_name": "const.colorB",
"line_number": 306,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 307,
"usage_type": "name"
},
{
"api_name": "mpl_toolkits.mplot3d.Axes3D",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVR",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "Stat.CoefficientOfDetermination.CoefficientOfDetermination",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 387,
"usage_type": "name"
},
{
"api_name": "numpy.argsort",
"line_number": 399,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 402,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 417,
"usage_type": "name"
},
{
"api_name": "sys.stdout",
"line_number": 418,
"usage_type": "attribute"
}
] |
100708399
|
from collections import deque
"""
1. We add indices of array elements in the deque. We can add both the element and its index but its better to add just the index for better space complexity
2. We keep only those indices in the list which are part of the current window size
3. We keep only useful indices in the deque.
If the element corresponding to and index in the deque is less than the current element we are looking at, we remove that index from the deque.
This is because that element is never gonna be the max element in current or in any subsequent windows.
The solution consists of two parts:
a) We keep going till k doing step 3 and then step 1 mentioned above.
b) We go from k to n, printing the first element of deque then step 2 -> step 3 -> step 1
Time Complexity: O(n)
"""
def max_sliding_window(A, k):
D = deque()
res, i = [], 0
for i in range(len(A)):
# keeping only meaningful elements
while D and D[-1][0] <= A[i]:
D.pop()
D.append((A[i], i+k-1))
# Start adding the max element in window i.e first element in deque once the window is completed the first time
if i >= k-1:
res.append(D[0][0])
# if the first element in deque is gonna be out of window the next time then pop it now
if i == D[0][1]:
D.popleft()
return res
print(max_sliding_window((4,3,2,1,5,7,6,8,9), 3))
| null |
karumanchi/Queues/sliding_window.py
|
sliding_window.py
|
py
| 1,453 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "collections.deque",
"line_number": 19,
"usage_type": "call"
}
] |
346556365
|
import numpy as np
from sklearn import preprocessing, neighbors, svm
from sklearn.model_selection import train_test_split
import pandas as pd
df = pd.read_csv('breast-cancer-wisconsin.data')
df.replace('?',-99999,inplace=True)
# if we include id label, then we are almost at coin-flipping status
# because it is the most random thing in the dataset
df.drop(['id'],1,inplace=True)
x = np.array(df.drop(['class'],1)) # features
y = np.array(df['class']) # label (predicting class)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2)
# SVC = support vector classifier
# it is a soft margin classifier, C is a parameter (slack coeff)
#
clf = svm.SVC(C = 1.0, # coefficient of slack
kernel = 'rbf', # which type of kernel to be used
degree = 3, # degree of the polynomial function
gamma = 'auto', # kernel coeff for 'rbf','poly','sigmoid', if gamma is 'auto' then 1/n_features will be used instead
coef0 = 0.0, # significant only in 'poly' and 'sigmoid'
probability = False, # to enable probability estimates or not
shrinking = True, # to use shrinking heuristic
tol = 1e-3, # tolerance for stopping criteria
verbose = False, # it takes advantage of per-process runtime setting in libsvm, it may not work properly in a multi-threaded context
max_iter = -1, # hard limit on iterations within solver
decision_function_shape = None # 'ovo','ovr' or None
)
# there are a lot of attributes for clf
# support_ = indices of support vectors
# n_support_ = number of support vectors for each class
# intercept_ = constants in decision function
clf.fit(x_train, y_train)
accuracy = clf.score(x_test, y_test)
print(accuracy)
example_measures = np.array([[4,2,1,1,1,2,3,2,1],[4,2,1,2,2,2,3,2,1]])
# it reshapes data according to what the classifier wants
# np.array(...).reshape({number of elements in the array}, {size of each element})
example_measures = example_measures.reshape(len(example_measures),-1)
prediction = clf.predict(example_measures)
print(prediction)
| null |
Kernels/soft_margin_svm_training.py
|
soft_margin_svm_training.py
|
py
| 2,109 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVC",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sklearn.svm",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 45,
"usage_type": "call"
}
] |
289573296
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.targets.exportable_jvm_library import ExportableJvmLibrary
from pants.backend.jvm.targets.junit_tests import JUnitTests
class JavaLibrary(ExportableJvmLibrary):
"""A Java library.
Normally has conceptually-related sources; invoking the ``compile`` goal
on this target compiles Java and generates classes. Invoking the ``jar``
goal on this target creates a ``.jar``; but that's an unusual thing to do.
Instead, a ``jvm_binary`` might depend on this library; that binary is a
more sensible thing to bundle.
:API: public
"""
default_sources_globs = '*.java'
default_sources_exclude_globs = JUnitTests.java_test_globs
@classmethod
def subsystems(cls):
return super(JavaLibrary, cls).subsystems()
def __init__(self, address=None, **kwargs):
super(JavaLibrary, self).__init__(address=address, **kwargs)
if 'scalac_plugins' in kwargs:
raise self.IllegalArgument(address.spec,
'java_library does not support the scalac_plugins argument.')
if 'scalac_plugin_args' in kwargs:
raise self.IllegalArgument(address.spec,
'java_library does not support the scalac_plugin_args argument.')
| null |
src/python/pants/backend/jvm/targets/java_library.py
|
java_library.py
|
py
| 1,523 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pants.backend.jvm.targets.exportable_jvm_library.ExportableJvmLibrary",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pants.backend.jvm.targets.junit_tests.JUnitTests.java_test_globs",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "pants.backend.jvm.targets.junit_tests.JUnitTests",
"line_number": 25,
"usage_type": "name"
}
] |
442259758
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 25 11:04:34 2019
@author: Shihan Li
plot the geographic distribution of average precipitation (1850-1980 AD)
"""
#import packages
import cmocean
import numpy as np
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
#readdata function
from netCDF4 import Dataset
def readdata(file_name):
file_path_re = r'D:\LifeInBremen\MODULES\Project\data\atmosphere'
b = '/'
file_path = file_path_re + b + file_name
file_obj = Dataset(file_path)
print(file_obj.variables.keys())
return file_obj
def static_plot(time_index, key_variable, lat, lon,
figsize, title1, title2, colorbar_title,figtitle):
#time_index: time interval filter conditions; key_variable: key variables for ploting
#lat, lon: coordinates from data
#figsize: define the size of final figure
#titles: title name for each element respectively
time_interval = time[time_index] #make the time interval
#pick up the key variable value for correspondent time interval
key_variable_in_interval = key_variable[time_index[0][0]:time_index[0][-1]+1,:,:]
#calculate the average in this time interval
key_variable_ave = np.empty([len(lat),len(lon)])
for i in range(len(lat)):
for j in range(len(lon)):
key_variable_ave[i,j] = np.mean(key_variable_in_interval[:,i,j])
#calculate zonal average
key_variable_zonal = np.empty([len(time_interval),len(lat)])
for i in range(len(time_interval)):
for j in range(len(lat)):
key_variable_zonal[i,j] = np.mean(key_variable_in_interval[i,j,:])
upper_limitation = 400
lower_limitation = 0
#start ploting
fig = plt.figure(figsize = figsize)
#grid the fig space
grid = plt.GridSpec(1, 3, wspace=0.5, hspace=0.5)
#plot ax1, average in time-interval
ax1 = fig.add_subplot(grid[0,:2], projection=ccrs.PlateCarree())
ax1.coastlines()
#plot ax1.controuf map
contf = ax1.contourf(lon,lat,key_variable_ave,
levels = np.linspace(lower_limitation,upper_limitation,41),
cmap = cmocean.cm.rain,
extend = 'max',
projection=ccrs.PlateCarree())
#set title for ax1
ax1.set_title(title1 , fontweight = 'bold', fontsize = 20)
#add colorbar
cb1 = fig.colorbar(contf, ticks = np.linspace(lower_limitation, upper_limitation, 11), format = '%.0f',
orientation = 'horizontal',fraction=0.08, pad=0.12)
#set ax1.colorbar format
cb1.set_label(colorbar_title, fontsize = 16)
cb1.ax.tick_params(labelsize=16)
#set ax1 label's and tick's format
ax1.set_xticks([ -180, -120, -60,0, 60, 120, 180,], crs=ccrs.PlateCarree())
ax1.set_yticks([-90, -60, -30, 0, 30, 60, 90], crs=ccrs.PlateCarree())
ax1.tick_params(axis='both', labelsize=20)
#use the default format for axes
ax1.xaxis.set_major_formatter(LongitudeFormatter())
ax1.yaxis.set_major_formatter(LatitudeFormatter())
#plot ax2, zonal distribution for ax1
ax2 = fig.add_subplot(grid[0,2])
key_variable_ave_zonal = np.mean(key_variable_ave, axis = 1)
ax2.plot(key_variable_ave_zonal, lat, linewidth=3.0, color = 'b')
#set ax2 format
ax2.set_title(title2, fontweight = 'bold', fontsize = 20 )
ax2.set_ylabel('°N',fontsize = 16)
ax2.tick_params(labelsize=16)
ax2.set_xlabel("cm/year",fontsize = 16)
#save fig
fig.savefig(figtitle)
return fig
##read precipitation data
file_name = ('trace.01-36.22000BP.cam2.PRECT.22000BP_decavg_400BCE.nc')
data_prect = readdata(file_name)
lon = data_prect.variables['lon'][:]
lat = data_prect.variables['lat'][:]
time = data_prect.variables['time'][:]
#unit conversion constant
unit_conversion_constant = 60 * 60 * 24 *365 * 100
# convert unit from m/s to cm/year
prect = data_prect.variables['PRECT'][:]* unit_conversion_constant
#fulfill the data gap in 360° lon by appending another column in lon and
#copying the data at 0° to it
lon = np.append(lon, 360)
prect = np.dstack((prect,prect[:,:,0]))
#define the time interval: 1850 - 1980 AD
time_index = np.where(time>=-0.1)
figsize = (15,6)
title1 = 'a.Annual precipitation (1850-1980 AD)'
title2 = 'b.Zonal average precipitation'
colorbar_title = 'cm/year'
figtitle = 'prect_ave_1850_1980.png'
fig = static_plot(time_index, prect, lat, lon,
figsize, title1, title2, colorbar_title,figtitle)
| null |
scripts/f8_prect_ave_1850_1980.py
|
f8_prect_ave_1850_1980.py
|
py
| 4,557 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "netCDF4.Dataset",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.GridSpec",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "cartopy.crs.PlateCarree",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "cartopy.crs",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "cmocean.cm",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "cartopy.crs.PlateCarree",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "cartopy.crs",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "cartopy.crs.PlateCarree",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "cartopy.crs",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "cartopy.crs.PlateCarree",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "cartopy.crs",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "cartopy.mpl.ticker.LongitudeFormatter",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "cartopy.mpl.ticker.LatitudeFormatter",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.dstack",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 108,
"usage_type": "call"
}
] |
604344337
|
""" Web runtime based on Selenium.
Selenium is a Python library to automate browsers.
"""
import os
import logging
from .common import WebRuntime
class SeleniumRuntime(WebRuntime):
""" Web runtime based on Selenium.
"""
def _launch(self):
# Get url and browser type
url = self._kwargs['url']
type = self._kwargs.get('browsertype', '')
# Import here; selenium is an optional dependency
from selenium import webdriver
# If that did not work, maybe we should try harder
# In particular on Windows, the exes may simply not be on the path
if type.lower() == 'firefox':
# from .xul import get_firefox_exe
# exe = get_firefox_exe()
# if exe:
# os.environ['PATH'] += os.pathsep + os.path.dirname(exe)
self._driver = webdriver.Firefox()
elif type.lower() == 'chrome':
# from .chromeapp import get_chrome_exe
# exe = get_chrome_exe() or 'google-chrome'
# if exe:
# os.environ['PATH'] += os.pathsep + os.path.dirname(exe)
self._driver = webdriver.Chrome()
elif type.lower() == 'ie':
from .mshtml import get_ie_exe
# exe = get_ie_exe()
# if exe:
# os.environ['PATH'] += os.pathsep + exe
self._driver = webdriver.Ie()
elif type:
classname = None
type2 = type[0].upper() + type[1:]
if hasattr(webdriver, type):
classname = type
elif hasattr(webdriver, type2):
classname = type2
if classname:
self._driver = getattr(webdriver, classname)()
else:
raise ValueError('Unknown Selenium browser type %r' % type)
else:
raise ValueError('Selenium runtime needs to know "browsertype".')
@property
def driver(self):
""" The Selenium webdriver object. Use this to control the browser.
"""
return self._driver
| null |
flexx/webruntime/selenium.py
|
selenium.py
|
py
| 2,160 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "common.WebRuntime",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.Firefox",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.Ie",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver",
"line_number": 58,
"usage_type": "name"
}
] |
305991789
|
#! /usr/bin/env python
# Copyright 2019 Peter Williams
# Licensed under the MIT License
"""Plot smoothed contours to see how well our algorithm works.
This uses `omegaplot`, a plotting package used by Peter Williams and pretty
much nobody else. Sorry.
"""
# note: Python 2.x compat not tested
from __future__ import absolute_import, division, print_function
import omega as om
import numpy as np
import postprocess
def main():
pg = om.makeDisplayPager()
for ident, flitem, geojson in postprocess.get_events():
for ftnum, feature in enumerate(geojson['features'][0]['geometry']['coordinates']):
x_orig = np.array([t[0] for t in feature])
y_orig = np.array([t[1] for t in feature])
area, x, y = postprocess.fix_polygon_handedness(x_orig, y_orig)
if area < postprocess.AREA_CUTOFF_68:
print('skipping %s+%d: A=%.1f' % (ident, ftnum + 1, area))
continue
x, y = postprocess.smooth_polygon(x, y)
p = om.quickXY(x_orig, y_orig, 'Original %s+%d: A=%.1f' % (ident, ftnum + 1, area))
p.addXY(x, y, 'Smoothed')
pg.send(p)
pg.done()
if __name__ == '__main__':
main()
| null |
bootstrap/check-smoothing.py
|
check-smoothing.py
|
py
| 1,222 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "omega.makeDisplayPager",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "postprocess.get_events",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "postprocess.fix_polygon_handedness",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "postprocess.AREA_CUTOFF_68",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "postprocess.smooth_polygon",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "omega.quickXY",
"line_number": 36,
"usage_type": "call"
}
] |
418204519
|
import os
import numpy as np
import json
import glob
import torch
import viz
import utils
from imagenet1000_clsid_to_human import clsid_to_human
from explainers import forward
from create_explainer import get_explainer
from preprocess import get_preprocess, get_normalize_preprocess
import copy
from collections import Iterable
import torch.nn as nn
from torch.autograd import Variable
import torchvision.transforms as transforms
import torch.nn.functional as F
import torchvision
from torchvision import models
from explainers import CASO, VanillaGradExplainer, IntegrateGradExplainer, \
SmoothGradExplainer
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib import pylab as P
transf = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor()
])
def setup_imagenet(batch_size=16, example_ids=None,
n_batches=-1, n_examples=-1,
shuffle=True, dump_name=None,
arch='resnet50'):
model = utils.load_model(arch)
model.eval()
model.cuda()
print('model loaded')
home_dir = 'C:/Users/singl/Desktop/Hessian_idea/ILSVRC_val/'
image_path = 'C:/Users/singl/Desktop/Hessian_idea/ILSVRC_val/**/*.JPEG'
image_files = list(glob.iglob(image_path, recursive=True))
# print(len(image_files))
image_files = sorted(image_files, key=lambda x: os.path.basename(x))
real_ids = [os.path.basename(x) for x in image_files]
label_path = 'C:/Users/singl/Desktop/Hessian_idea/ILSVRC2012_devkit_t12/data/ILSVRC2012_validation_ground_truth.txt'
with open(label_path) as f:
labels = [clsid_to_human[int(x)-1] for x in f.readlines()]
if example_ids is not None:
examples = {r: (r, m, l)
for r, m, l in zip(real_ids, image_files, labels)}
examples = [examples[x] for x in example_ids]
else:
examples = list(zip(real_ids, image_files, labels))
if shuffle:
np.random.seed(0)
np.random.shuffle(examples)
if n_examples > 0:
examples = examples[:n_examples]
elif n_batches > 0:
examples = examples[:batch_size * n_batches]
else:
print('using all images')
selected_files = sorted([x[0] for x in examples])
if dump_name is not None:
with open(dump_name, 'w') as f:
f.write(json.dumps(selected_files))
# print('\n'.join(selected_files))
def batch_loader(batch):
batch = list(map(list, zip(*batch)))
ids, xs, ys = batch
return (ids, [viz.pil_loader(x) for x in xs], ys)
batch_indices = list(range(0, len(examples), batch_size))
batches = [examples[i: i + batch_size] for i in batch_indices]
batches = map(batch_loader, batches)
# print('image loaded', len(batch_indices))
return model, batches
def perturb(model, X, y=None, epsilon=2.0/255.0, protected=None):
logits = forward(model, X)
if y is None:
y = logits.max(1)[1]
loss = F.cross_entropy(logits, y)
x_grad, = torch.autograd.grad(loss, X)
grad_sign = x_grad.cpu().sign().numpy()
batch_size, channels, height, width = X.shape
total_saliency = protected.reshape(batch_size, -1).sum(1)[0]
budget = (channels * height * width * epsilon)
if total_saliency > 0:
perturb_mag = budget/total_saliency
else:
perturb_mag = 0
protected = np.repeat(protected[:, np.newaxis, :, :], 3, axis=1)
perturbation = grad_sign * protected * perturb_mag
perturbed_X = X.data.cpu().numpy() + perturbation
perturbed_X = np.clip(perturbed_X, 0, 1)
X = Variable(torch.from_numpy(perturbed_X).cuda(), requires_grad = True).float()
return X
def important_pixels(saliency, cutoff = 10):
if cutoff == 0:
return np.zeros_like(saliency)
if cutoff == 100:
return np.ones_like(saliency)
batch_size, height, width = saliency.shape
saliency = np.abs(saliency)
saliency = saliency.reshape(batch_size, -1)
vmax = np.percentile(saliency, cutoff, axis=1, keepdims=True)
protected_region = saliency <= vmax
protected_region = protected_region.reshape(batch_size, height, width)
assert protected_region.shape == (batch_size, height, width)
return protected_region
if __name__ == '__main__':
dataset = 'imagenet'
explainers = [
# ('Random', None),
('Vanilla', VanillaGradExplainer()),
('SmoothGrad', SmoothGradExplainer()),
('IntegratedGrad', IntegrateGradExplainer()),
('CAFO', CASO(second_order=False, smooth=False)),
('CASO', CASO(second_order=True, smooth=False)),
('SmoothCAFO', CASO(second_order=False, smooth=True)),
('SmoothCASO', CASO(second_order=True, smooth=True)),
]
cutoff_scores = dict()
for method_name, explainer in explainers:
cutoff_scores[method_name] = [0] * 11
cutoffs = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
num_images = 10
batch_size = 1
attack_method = 'scaled'
with open('ghorbani.json') as f:
example_ids = json.load(f)
print(len(example_ids))
# example_ids = [example_ids[2]]
ids = [3, 8, 13, 26, 30, 33, 36, 37, 53, 71, 86, 96, 113, 114, 115, 117, 129, 131, 147]
# ids += [156, 158, 159, 177, 194, 199, 207, 208, 219, 230, 248, 255, 261, 262, 264, 278]
# ids += [285, 288, 320, 327, 328, 332, 335, 352, 355, 359, 361, 386, 394, 399, 408, 421]
# ids += [426, 429, 443, 454, 458, 459, 466, 467, 476, 480, 488, 491, 499, 502, 528, 530]
# ids += [531, 538, 543, 552, 554, 556, 561, 567, 586, 588, 599, 603, 619, 624, 629, 635]
# ids += [648, 681, 685, 705, 717, 720, 743, 761, 762, 771, 776, 777, 781, 785, 797, 821]
# ids += [832, 833, 841, 853, 862, 873, 876, 879, 889, 902, 945, 946, 948, 983, 987]
print(len(ids))
example_ids = [example_ids[i] for i in ids]
batch_size = 1
assert batch_size==1
model, batches = setup_imagenet(batch_size=batch_size, example_ids=example_ids)
batches = list(batches)
for i, batch in enumerate(batches):
# print(len(batch))
# print(torch.max(batch.view(batch.shape[0], -1), 1)[0])
# print(torch.min(batch.view(batch.shape[0], -1), 1)[0])
xs = Variable(torch.stack([transf(x) for x in batch[1]]).cuda(), requires_grad=True)
# xs = Variable(batch.cuda(), requires_grad=True)
logits = forward(model, xs)
prediction = logits.max(1, keepdim=True)[1]
print(str(i*batch_size))
for method_name, explainer in explainers:
print('start ' + method_name)
if method_name == "Random":
saliency = torch.from_numpy(np.random.rand(*xs.shape)).float()
else:
saliency = explainer.explain(model, xs)
print('done ' + method_name)
saliency = viz.VisualizeImageGrayscale(saliency.cpu())
for cutoff in cutoffs:
protected_region = important_pixels(saliency.cpu().numpy(), cutoff=cutoff)
adversarial_image = perturb(model, xs, protected=protected_region)
adversarial_prediction = forward(model, adversarial_image).max(1, keepdim=True)[1]
correct = prediction.eq(adversarial_prediction).sum().cpu().data.numpy()
cutoff_scores[method_name][int(cutoff/10)] += (correct/num_images)
# continue
# plt.figure(figsize=(25, 15))
# plt.subplot(3, 5, 1)
markers = ['-go', '-bv', '-ro', '-c.', '-m+', '-y^', '-gs', '-bD']
for i, explainer in enumerate(explainers):
explainer_name = explainer[0]
plt.plot(cutoffs, cutoff_scores[explainer_name], markers[i], label=explainer_name)
plt.xlabel('Percentage of Pixels Attacked')
plt.ylabel('Accuracy')
plt.title(dataset)
plt.legend()
plt.savefig(dataset + '_pgd_attack.pdf')
plt.close()
# raw_img = np.swapaxes(raw_img, 1,2)
# plt.imshow(np.swapaxes(raw_img, 0,2))
# plt.axis('off')
# plt.title('Dog')
# for i, saliency in enumerate(all_saliency_maps):
# plt.subplot(3, 5, i + 2 + i // 4)
# saliency = saliency[0]
# saliency = np.swapaxes(saliency, 1,2)
# plt.imshow(np.swapaxes(saliency, 0,2))#, cmap=P.cm.gray, vmin=0, vmax=1)
# plt.axis('off')
# #plt.title("")
# plt.tight_layout()
# plt.savefig('output/protected_attack.png')
with open("protected_results.txt", "w") as text_file:
text_file.write(str(method_name) + '\n')
for cutoff in cutoffs:
text_file.write('\n' + str(cutoff) + '\n' +'\n')
print("Adversary Can Modify: ", cutoff)
for method_name, explainer in explainers:
print(method_name, cutoff_scores[method_name][int(cutoff/10)])
text_file.write(str(method_name) + '\n')
text_file.write(str(cutoff_scores[method_name][int(cutoff/10)]) + '\n')
| null |
protected_attack_redo.py
|
protected_attack_redo.py
|
py
| 9,157 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.use",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.CenterCrop",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "utils.load_model",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "glob.iglob",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "imagenet1000_clsid_to_human.clsid_to_human",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "numpy.random.seed",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.shuffle",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "viz.pil_loader",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "explainers.forward",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.cross_entropy",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "torch.autograd.grad",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "torch.autograd",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "numpy.repeat",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "numpy.clip",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "explainers.VanillaGradExplainer",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "explainers.SmoothGradExplainer",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "explainers.IntegrateGradExplainer",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "explainers.CASO",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "explainers.CASO",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "explainers.CASO",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "explainers.CASO",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "explainers.forward",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "numpy.random.rand",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 180,
"usage_type": "attribute"
},
{
"api_name": "viz.VisualizeImageGrayscale",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "explainers.forward",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 200,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 204,
"usage_type": "name"
}
] |
184775157
|
"""Template tags used to sort a queryset.
"""
from django import template
from django.http import Http404
from django.conf import settings
register = template.Library()
DEFAULT_SORT_UP = getattr(settings, 'DEFAULT_SORT_UP' , '↑')
DEFAULT_SORT_DOWN = getattr(settings, 'DEFAULT_SORT_DOWN' , '↓')
INVALID_FIELD_RAISES_404 = getattr(settings,
'SORTING_INVALID_FIELD_RAISES_404' , False)
sort_directions = {
'asc': {'icon':DEFAULT_SORT_UP, 'inverse': 'desc'},
'desc': {'icon':DEFAULT_SORT_DOWN, 'inverse': 'asc'},
'': {'icon':DEFAULT_SORT_DOWN, 'inverse': 'asc'},
}
def anchor(parser, token):
"""
Parses a tag that's supposed to be in this format:
{% anchor field title anchor_class anchor_rel %}
where the 'title', 'anchor_class' and 'anchor_rel' arguments are optional.
"""
bits = [b.strip('"\'') for b in token.split_contents()]
if len(bits) < 2:
raise template.TemplateSyntaxError(
"anchor tag takes at least 1 argument")
try:
title = bits[2]
except IndexError:
title = bits[1].capitalize()
if len(bits) >= 4:
# User specified the anchor_class and anchor_rel arguments
anchor_class = bits[len(bits)-2]
anchor_rel = bits[len(bits)-1]
return SortAnchorNode(bits[1].strip(), title.strip(),
anchor_class.strip(), anchor_rel.strip())
return SortAnchorNode(bits[1].strip(), title.strip())
class SortAnchorNode(template.Node):
"""
Renders an <a> HTML tag with a link which href attribute
includes the field on which we sort and the direction.
and adds an up or down arrow if the field is the one
currently being sorted on.
Eg.
{% anchor name Name %} generates
<a href="/the/current/path/?sort=name" title="Name">Name</a>
"""
def __init__(self, field, title, anchor_class=None, anchor_rel=None):
self.field = field
self.title = title
self.anchor_class = ""
self.anchor_rel = ""
if anchor_class is not None:
self.anchor_class = ' class="%s"' % anchor_class
if anchor_rel is not None:
self.anchor_rel = ' rel="%s"' % anchor_rel
def render(self, context):
request = context['request']
getvars = request.GET.copy()
if 'sort' in getvars:
sortby = getvars['sort']
del getvars['sort']
else:
sortby = ''
if 'dir' in getvars:
sortdir = getvars['dir']
del getvars['dir']
else:
sortdir = ''
if sortby == self.field:
getvars['dir'] = sort_directions[sortdir]['inverse']
icon = sort_directions[sortdir]['icon']
else:
icon = ''
if len(getvars.keys()) > 0:
urlappend = "&%s" % getvars.urlencode()
else:
urlappend = ''
if icon:
title = "%s %s" % (self.title, icon)
else:
title = self.title
url = '%s?sort=%s%s' % (request.path, self.field, urlappend)
return '<a href="%s" title="%s"%s%s>%s</a>' \
% (url, self.title, self.anchor_class, self.anchor_rel, title)
def autosort(parser, token):
bits = [b.strip('"\'') for b in token.split_contents()]
if len(bits) != 2:
raise template.TemplateSyntaxError(
"autosort tag takes ecxactly one argument")
return SortedDataNode(bits[1])
def attribute_lookup_factory(order_by):
'''Returns function with django-db-like attribute lookup.
Example:
'''
keys = order_by.split('__')
def attribute_lookup(obj):
parent_obj = obj
attr_obj = None
for key in keys:
# lookup parameters order:
# 1. dictionary lookup
# 2. method call
# 3. attribute
if hasattr(parent_obj, '__iter__'):
if key in parent_obj:
attr_obj = parent_obj[key]
parent_obj = attr_obj
elif hasattr(parent_obj, key):
attr_obj = getattr(parent_obj, key)
if hasattr(attr_obj, '__call__'):
if not getattr(attr_obj, 'alters_data', False):
parent_obj = attr_obj()
else:
parent_obj = attr_obj
else:
break
return parent_obj
return attribute_lookup
class SortedDataNode(template.Node):
"""
Automatically sort a queryset with {% autosort queryset %}
"""
def __init__(self, queryset_var, context_var=None):
self.queryset_var = template.Variable(queryset_var)
self.context_var = context_var
def render(self, context):
key = self.queryset_var.var
value = self.queryset_var.resolve(context)
order_by = context['request'].field
if len(order_by) > 1:
try:
context[key] = value.order_by(order_by)
except AttributeError:
if order_by[0] == '-':
reverse = True
order_by = order_by[1:]
else:
reverse = False
context[key] = sorted(value, key=attribute_lookup_factory(order_by),
reverse=reverse)
except template.TemplateSyntaxError:
if INVALID_FIELD_RAISES_404:
raise Http404(
'Invalid field sorting. If DEBUG were set to False, '
'an HTTP 404 page would have been shown instead.')
context[key] = value
else:
context[key] = value
return u''
def anchor(context, field, title, anchor_class=None, anchor_rel=None):
request = context['request']
getvars = request.GET.copy()
if 'sort' in getvars:
sortby = getvars['sort']
del getvars['sort']
else:
sortby = ''
if 'dir' in getvars:
sortdir = getvars['dir']
del getvars['dir']
else:
sortdir = ''
if sortby == field:
getvars['dir'] = sort_directions[sortdir]['inverse']
icon = sort_directions[sortdir]['icon']
else:
icon = ''
if len(getvars.keys()) > 0:
urlappend = "&%s" % getvars.urlencode()
else:
urlappend = ''
reverse = sort_directions[sortdir]['inverse']
url = '%s?sort=%s%s' % (request.path, field, urlappend)
return {
'url': url,
'title': title,
'icon': icon,
'inverse_icon': '' if icon == '' else sort_directions[reverse]['icon'],
'class': anchor_class,
'rel': anchor_rel,
'urlappend': urlappend,
'sortdir': sortdir,
}
anchor = register.tag(anchor)
autosort = register.tag(autosort)
register.inclusion_tag('django_sorting/anchor.html', takes_context=True)(anchor)
| null |
django_sorting/templatetags/sorting_tags.py
|
sorting_tags.py
|
py
| 6,922 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.template.Library",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.conf.settings",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "django.conf.settings",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "django.conf.settings",
"line_number": 12,
"usage_type": "argument"
},
{
"api_name": "django.template.TemplateSyntaxError",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.template.Node",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "django.template",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "django.template.TemplateSyntaxError",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "django.template.Node",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "django.template",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "django.template.Variable",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "django.template.TemplateSyntaxError",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "django.template",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 164,
"usage_type": "call"
}
] |
467751928
|
# -*- coding: utf-8 -*-
import logging
from src.core.util import get_soup
from src.exception import InvalidateBoardError
logger = logging.getLogger(__name__)
class DaumBoard:
def __init__(self, board, date):
self.date = self.target_date(date)
self.board = self.target_board(board)
def get_url(self, page):
host = 'http://media.daum.net/breakingnews/{}'.format(self.board)
query = 'page={}®Date={}'.format(page, self.date)
return '{host}?{query}'.format(host=host, query=query)
def link_generator(self):
for page in range(1, 9999):
url = self.get_url(page)
try:
yield self._scrap_a_page(url)
except InvalidateBoardError:
break
@staticmethod
def _scrap_a_page(url):
result = list()
try:
list_ = get_soup(url).find('ul', class_='list_allnews')
for link in list_.find_all('a', class_='link_txt'):
result.append(link.get('href'))
return result
except:
raise InvalidateBoardError
@staticmethod
def target_board(board):
return board
@staticmethod
def target_date(date):
return str(date.strftime('%Y%m%d'))
class NaverBoard:
def __init__(self, board, date):
self.date = self.target_date(date)
self.board = self.target_board(board)
def get_url(self, page):
host = 'http://news.naver.com/main/list.nhn'
query = 'mode=LSD&mid=shm&listType=title&sid1={}&date={}&page={}' \
.format(self.board, self.date, page)
return '{host}?{query}'.format(host=host, query=query)
def link_generator(self):
last_page = self._get_last_page()
for page in range(1, last_page):
url = self.get_url(page)
yield self._scrap_a_page(url)
def _get_last_page(self):
url = self.get_url(9999)
soup = get_soup(url)
last_page = soup.find('div', class_='paging').find('strong').string
return int(last_page)
@staticmethod
def _scrap_a_page(url):
result = list()
soup = get_soup(url)
list_ = soup.find('div', class_='list_body')
list_.div.decompose()
for link in list_.find_all('a'):
result.append(link.get('href'))
return result
@staticmethod
def target_board(board):
return {
'politics': '100',
'economic': '101',
'society': '102',
'culture': '103',
'foreign': '104',
'digital': '105',
}.get(board)
@staticmethod
def target_date(date):
return str(date.strftime('%Y%m%d'))
class BoardScrap:
def __init__(self, name, board, date):
if name == 'daum':
self.board = DaumBoard(board, date)
pass
elif name == 'naver':
self.board = NaverBoard(board, date)
@property
def link(self):
for link in self.board.link_generator():
yield link
| null |
src/core/board.py
|
board.py
|
py
| 3,068 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "src.exception.InvalidateBoardError",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "src.core.util.get_soup",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "src.exception.InvalidateBoardError",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "src.core.util.get_soup",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "src.core.util.get_soup",
"line_number": 76,
"usage_type": "call"
}
] |
58251007
|
from functools import partial
import numpy as np
import torch
from padertorch.contrib.je.modules.augment import (
mel_warping, truncexponential_sampling_fn, log_truncnormal_sampling_fn,
log_uniform_sampling_fn, Scale, Mixup, Mask,
)
from padertorch.contrib.je.modules.features import MelTransform
from padertorch.contrib.je.modules.hybrid import CRNN as BaseCRNN
from padertorch.contrib.je.modules.norm import Norm, MulticlassNorm
from torch import nn
from torchvision.utils import make_grid
from upb_audio_tagging_2019.lwlrap import lwlrap_from_precisions
from upb_audio_tagging_2019.lwlrap import positive_class_precisions
class CRNN(BaseCRNN):
def __init__(
self, cnn_2d, cnn_1d, rnn, fcn, *, sample_rate, fft_length, n_mels,
post_rnn_pooling=None, decision_boundary=.5,
max_scale=None,
fmin=50., fmax=None, frequency_warping=True,
n_norm_classes=None,
mixup_prob=0.,
n_time_masks=1, max_masked_time_steps=70, max_masked_time_rate=.2,
n_frequency_masks=1, max_masked_frequency_steps=16, max_masked_frequency_rate=.2,
):
super().__init__(
cnn_2d, cnn_1d, rnn, fcn, post_rnn_pooling=post_rnn_pooling
)
self.decision_boundary = decision_boundary
self.n_norm_classes = n_norm_classes
if max_scale is not None:
self.scale = Scale(
log_uniform_sampling_fn, scale=2*np.abs(np.log(max_scale))
)
else:
self.scale = None
self.mel_transform = MelTransform(
n_mels=n_mels, sample_rate=sample_rate, fft_length=fft_length,
fmin=fmin, fmax=fmax,
warping_fn=mel_warping if frequency_warping else None,
alpha_sampling_fn=partial(
log_truncnormal_sampling_fn, scale=.1, truncation=np.log(1.5)
),
fhi_sampling_fn=partial(
truncexponential_sampling_fn, scale=.5, truncation=5.
)
)
norm_kwargs = dict(
data_format='bcft',
shape=(None, 1, n_mels, None),
statistics_axis='bt',
scale=True,
independent_axis=None,
momentum=None,
interpolation_factor=1.,
)
if n_norm_classes is None:
self.in_norm = Norm(**norm_kwargs)
else:
self.in_norm = MulticlassNorm(
n_classes=n_norm_classes, **norm_kwargs
)
if mixup_prob > 0.:
self.mixup = Mixup(interpolate=False, p=mixup_prob,)
else:
self.mixup = None
if n_time_masks > 0:
self.time_masking = Mask(
axis=-1, n_masks=n_time_masks,
max_masked_steps=max_masked_time_steps,
max_masked_rate=max_masked_time_rate,
)
else:
self.time_masking = None
if n_frequency_masks > 0:
self.freq_masking = Mask(
axis=-2, n_masks=n_frequency_masks,
max_masked_steps=max_masked_frequency_steps,
max_masked_rate=max_masked_frequency_rate,
)
else:
self.freq_masking = None
def forward(self, inputs):
x = inputs['features']
seq_len = inputs['seq_len']
y = inputs['events']
if self.scale is not None:
x = self.scale(x)
x = self.mel_transform(torch.sum(x**2, dim=(-1,))).transpose(-2, -1)
if self.n_norm_classes is None:
x = self.in_norm(x, seq_len=seq_len)
else:
x = self.in_norm(
x, seq_len=seq_len, class_idx=inputs['norm_class_idx']
)
if self.mixup is not None:
x, y = self.mixup(x, y)
y = torch.min(y, torch.ones_like(y))
if self.time_masking is not None:
x = self.time_masking(x, seq_len=seq_len)
if self.freq_masking is not None:
x = self.freq_masking(x)
h, seq_len = self.cnn_2d(x, seq_len)
h, seq_len = self.cnn_1d(h, seq_len)
h = self.rnn(h, seq_len=seq_len)
h, seq_len = self.post_rnn_pooling(h, seq_len)
return nn.Sigmoid()(self.fcn(h)), y, x
def review(self, inputs, outputs):
# compute loss
y_hat, y, x = outputs
if y_hat.dim() == 3: # (B, T, K)
if y.dim() == 2: # (B, K)
y = y.unsqueeze(1).expand(y_hat.shape)
y_hat = y_hat.contiguous().view((-1, y_hat.shape[-1]))
y = y.contiguous().view((-1, y.shape[-1]))
assert y_hat.dim() == y.dim() == 2
bce = nn.BCELoss(reduction='none')(y_hat, y).sum(-1)
# create review including metrics and visualizations
labels, label_ranked_precisions = positive_class_precisions(
y.cpu().data.numpy(),
y_hat.cpu().data.numpy()
)
decision = (y_hat.detach() > self.decision_boundary).float()
true_pos = (decision * y).sum()
false_pos = (decision * (1.-y)).sum()
false_neg = ((1.-decision) * y).sum()
review = dict(
loss=bce.mean(),
scalars=dict(
labels=labels,
label_ranked_precisions=label_ranked_precisions,
true_pos=true_pos.cpu().data.numpy(),
false_pos=false_pos.cpu().data.numpy(),
false_neg=false_neg.cpu().data.numpy()
),
histograms=dict(),
images=dict(
features=x[:3],
)
)
return review
def modify_summary(self, summary):
# compute lwlrap
if 'labels' in summary['scalars']:
labels = summary['scalars'].pop('labels')
label_ranked_precisions = summary['scalars'].pop(
'label_ranked_precisions'
)
summary['scalars']['lwlrap'] = lwlrap_from_precisions(
label_ranked_precisions, labels
)[0]
# compute precision, recall and fscore for each decision boundary
if 'true_pos' in summary['scalars']:
tp = np.sum(summary['scalars'].pop('true_pos'))
fp = np.sum(summary['scalars'].pop('false_pos'))
fn = np.sum(summary['scalars'].pop('false_neg'))
p = tp/(tp+fp)
r = tp/(tp+fn)
summary['scalars'][f'precision'] = p
summary['scalars'][f'recall'] = r
summary['scalars'][f'fscore'] = 2*(p*r)/(p+r)
for key, scalar in summary['scalars'].items():
summary['scalars'][key] = np.mean(scalar)
for key, image in summary['images'].items():
if image.dim() == 4 and image.shape[1] > 1:
image = image[:, 0]
if image.dim() == 3:
image = image.unsqueeze(1)
summary['images'][key] = make_grid(
image.flip(2), normalize=True, scale_each=False, nrow=1
)
return summary
input_size_key = 'n_mels'
| null |
padertorch/contrib/je/models/tagging.py
|
tagging.py
|
py
| 7,057 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "padertorch.contrib.je.modules.hybrid.CRNN",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "padertorch.contrib.je.modules.augment.Scale",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "padertorch.contrib.je.modules.augment.log_uniform_sampling_fn",
"line_number": 36,
"usage_type": "argument"
},
{
"api_name": "numpy.abs",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "padertorch.contrib.je.modules.features.MelTransform",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "padertorch.contrib.je.modules.augment.mel_warping",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "functools.partial",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "padertorch.contrib.je.modules.augment.log_truncnormal_sampling_fn",
"line_number": 45,
"usage_type": "argument"
},
{
"api_name": "numpy.log",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "padertorch.contrib.je.modules.augment.truncexponential_sampling_fn",
"line_number": 48,
"usage_type": "argument"
},
{
"api_name": "padertorch.contrib.je.modules.norm.Norm",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "padertorch.contrib.je.modules.norm.MulticlassNorm",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "padertorch.contrib.je.modules.augment.Mixup",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "padertorch.contrib.je.modules.augment.Mask",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "padertorch.contrib.je.modules.augment.Mask",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "torch.ones_like",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "torch.nn.Sigmoid",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "torch.nn.BCELoss",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "upb_audio_tagging_2019.lwlrap.positive_class_precisions",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "upb_audio_tagging_2019.lwlrap.lwlrap_from_precisions",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "torchvision.utils.make_grid",
"line_number": 183,
"usage_type": "call"
}
] |
499994573
|
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
# import plot_matrix
import cv2
#
# def add_text(dists):
# time = 0
# for dist in dists:
# time = time + 15
# text = dist, 'km in', time,'mins'
# text(0, (.75 *dist), text, fontsize=12)
def plot (matrix, dists):
time = 0
fig = np.array(matrix)
fig = plt.figure(figsize=(6, 3.2))
for dist in dists:
time = time + 15
text = round(dist,0), time
# fig.text(.5, (.015 *dist), text, fontsize=6)
fig.text(0.5, .475 +(.015 *dist), text,fontsize=8, horizontalalignment='center', verticalalignment='center')
plt.axis('off')
#ax = fig.add_subplot(111)
#ax.set_title('colorMap')
plt.imshow(matrix)
fig.savefig("mapped_area.png",transparent = True, bbox_inches = 'tight', pad_inches = 0)
# ax.set_aspect('equal')
#
# cax = fig.add_axes([0.12, 0.1, 0.78, 0.8])
# cax.get_xaxis().set_visible(False)
# cax.get_yaxis().set_visible(False)
# cax.patch.set_alpha(0)
# cax.set_frame_on(False)
# plt.colorbar(orientation='vertical')
#plt.show()
def replace_img (l_img,s_img):
y_offset = 0
x_offset = 0
y1, y2 = y_offset, y_offset + s_img.shape[0]
x1, x2 = x_offset, x_offset + s_img.shape[1]
alpha_s = s_img[:, :, 3] / 255.0
alpha_l = 1.0 - alpha_s
for c in range(0, 3):
l_img[y1:y2, x1:x2, c] = (alpha_s * s_img[:, :, c] +
alpha_l * l_img[y1:y2, x1:x2, c])
return l_img
def edit_image (map_img):
img = cv2.imread("/Users/2020shatgiskessell/Desktop/Missing_Child_Recognition/mapped_area.png",-1)
h,w,c = map_img.shape
#print (str(h) + str(" , ") + str(w))
# img = cv2.resize(img,(h,w))
#overlayed = cv2.addWeighted(map_img,0.4,img,0.1,0)
overlayed = replace_img (map_img,img)
cv2.imshow("overlayed", overlayed)
cv2.waitKey(0)
#map = cv2.imread("/Users/2020shatgiskessell/Desktop/Missing_Child_Recognition/map.png",-1)
# img, dists = plot_matrix.run(20000,50)
# plot (img, dists)
#edit_image (map)
| null |
plot_data.py
|
plot_data.py
|
py
| 2,122 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.array",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "cv2.imread",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 68,
"usage_type": "call"
}
] |
243074859
|
import streamlit as st
import streamlit.components.v1 as components
from PIL import Image
def principal():
lgit = """<a href='https://github.com/CartagenaMinas/VentiST' target="_blank">Github</a>"""
st.title('CÁLCULO DEL CAUDAL DE AIRE MINA')
st.write("")
st.write("")
col1, col2 = st.columns([2, 2])
#col1.write('Bienvenidos en esta app donde se podra calcular el requerimiento de aire necesario para Minería Subterránea.')
#col1.write(f'\n Si quieres quieres conocer el codigo de esta app escrito en python puedes visitarlo en el siguiente enlace de {lgit}.', unsafe_allow_html=True)
#col1.write("Tambien puedes visitar nuestra pagina web IDL Mining donde subimos post sobre programacion orientado a la ingeniera de minas y nos dedicamos a la creacion de Modelos de Deep Learning.")
col1.markdown("<div style='text-align: justify'>Esta app está enfocada a ser de guía básica de cómo utilizar Deep Learning para clasificar si una persona lleva mascarilla o no lleva mascarilla.</div>", unsafe_allow_html=True)
col1.markdown("<div style='text-align: justify'>Tambien puedes visitar nuestra pagina web IDL Mining donde subimos post sobre programacion orientado a la ingeniera de minas y nos dedicamos a la creacion de Modelos de Deep Learning.</div>", unsafe_allow_html=True)
image = Image.open('imagenes/j11.png')
col2.write("")
col2.image(image, caption='Ventilación en minería subterránea')
st.write('### CREATED BY CRISTIAN CARTAGENA MATOS')
components.html(
"""
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
<a style="color:black; font-size:110% ;" href="https://www.linkedin.com/in/cristiancartagenamatos/" target="_blank"><i class="fa fa-linkedin-square"></i>Linkedin</a>
<a style="color:black; font-size:110% ;" href="https://github.com/CartagenaMinas" target="_blank"><i class="fa fa-github"></i>Github</a>
<a style="color:black; font-size:110% ;" href="http://www.idlmining.com/" target="_blank"><i class="fa fa-rocket"></i>IDL Mining</a>
""" , height=600)
| null |
menu.py
|
menu.py
|
py
| 2,161 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "streamlit.title",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "streamlit.columns",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "streamlit.write",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "streamlit.components.v1.html",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "streamlit.components.v1",
"line_number": 21,
"usage_type": "name"
}
] |
287406169
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'MFC'
__time__ = '18/6/23 04:24'
import requests
import json
# 测试下Python的兼容性
access_token = "11_4LXEASFtFR4AUJCNWyEcQ2-7cl3qgFmMnc4RU3ZjSWyOjGSj7i0b1Eksj37QF48S4zR12WSeCJWomA7r49z12EhP15_1ZPo2-kUqttZmbwr-zWXHK5s6wdGlFRbshDqTa70z1nKfqAFPpDvFLREiAHAPVI"
url = 'https://api.weixin.qq.com/cgi-bin/message/custom/send?access_token={}'.format(access_token)
content = u'''Python2欢迎关注活动家——国内会议查询报名第一平台\n\n一键查询近期热门会议信息,试试回复:区块链\n\n马上开启大咖之旅,从<a href="https://mp.weixin.qq.com/mp/profile_ext?action=home&__biz=MzA4NjEzNTYxMw==&scene=124#wechat_redirect">点击这里开始</a>'''
# content = u'<a href="http://www.qq.com">点击跳小程序</a>'
data = {
"touser": "oPbHyt4UFfQdOANBjVfjmGWaJZOo", # user openid
"msgtype": "text",
"text": {
"content": content,
}
}
# 这两步处理很关键,否则微信会回复/uXXXX
# 注意和Python3的区别
data = json.dumps(data, ensure_ascii=False) # unicode
print(type(data))
# data2 = json.dumps(data) # str
# print(type(data2))
data = data.encode('utf-8') # str
ret = requests.post(url, data=data)
print(ret.text)
| null |
dev_draft/wechat_api_test/custom_reply_request.py
|
custom_reply_request.py
|
py
| 1,272 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "json.dumps",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 37,
"usage_type": "call"
}
] |
383178204
|
from urllib.request import urlopen
from shutil import copyfileobj, copyfile
from tempfile import NamedTemporaryFile
import os
def fetch_file(url, outfile):
try:
with urlopen(url) as fsrc, NamedTemporaryFile(delete=False) as fdst:
copyfileobj(fsrc, fdst)
print(fdst.name)
copyfile(fdst.name, outfile)
os.remove(fdst.name)
except Exception as err:
raise os.error
else:
return True
| null |
moduler/download_file.py
|
download_file.py
|
py
| 456 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "urllib.request.urlopen",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "tempfile.NamedTemporaryFile",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "shutil.copyfileobj",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "shutil.copyfile",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.error",
"line_number": 15,
"usage_type": "attribute"
}
] |
637704243
|
#
import matplotlib.pyplot as plt
from datetime import datetime, timedelta, time
def str2datetime(this_str):
year = int(this_str[:4])
month = int(this_str[4:6])
day = int(this_str[6:8])
hour = int(this_str[9:11])
minute = int(this_str[11:13])
second = int(this_str[13:15])
return datetime(year, month, day, hour, minute, second)
with open('30min_prices.matrix') as f:
line1 = f.next().split(',')[1:]
timeline = []
for t in line1:
timeline.append(str2datetime(t))
s1 = f.next().split(',')[1:]
d1 = [float(d) for d in s1]
plt.plot(timeline, d1)
plt.show()
| null |
stat_arbi/tmp.py
|
tmp.py
|
py
| 572 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "datetime.datetime",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 23,
"usage_type": "name"
}
] |
191678748
|
from pathlib import Path
import pytest
import numpy as np
import pandas as pd
import rpy2.robjects as ro
from rpy2.robjects.conversion import localconverter
from rpy2.robjects import pandas2ri
from multiplier import MultiplierProjection
def read_rds(test_case_number: int, kind: str):
"""Reads a test case data from an RDS file.
Args:
test_case_number (int): test case number to be read.
kind (str): kind of data; it could be 'input_data' or 'output_data'.
"""
readRDS = ro.r["readRDS"]
rds_file = (
Path(__file__).resolve().parent
/ "data"
/ "multiplier"
/ f"test_case{test_case_number}/{kind}.rds"
)
df = readRDS(str(rds_file))
with localconverter(ro.default_converter + pandas2ri.converter):
d = ro.conversion.rpy2py(df)
return pd.DataFrame(data=d, index=df.rownames, columns=df.colnames)
def run_saved_test_case_simple_check(test_case_number, test_function=np.allclose):
# prepare
np.random.seed(0)
input_data = read_rds(test_case_number, "input_data")
# run
mproj = MultiplierProjection()
proj_data = mproj.transform(input_data)
# evaluate
assert proj_data is not None
assert proj_data.shape == (987, input_data.shape[1])
assert isinstance(proj_data, pd.DataFrame)
expected_output_data = read_rds(test_case_number, "output_data")
assert expected_output_data.shape == proj_data.shape
assert test_function(expected_output_data.values, proj_data.values)
@pytest.mark.parametrize(
"test_case_number",
# these three cases include simple and small dataset with just a few genes and
# traits (columns)
[1, 2, 3],
)
def test_project_simple_data(test_case_number):
run_saved_test_case_simple_check(test_case_number)
@pytest.mark.parametrize("test_case_number", [4])
def test_project_data_with_nan(test_case_number):
run_saved_test_case_simple_check(
test_case_number, lambda x, y: np.allclose(x, y, equal_nan=True)
)
@pytest.mark.parametrize("test_case_number", [5])
def test_project_phenomexcan_subsample(test_case_number):
run_saved_test_case_simple_check(test_case_number)
| null |
tests/test_multiplier.py
|
test_multiplier.py
|
py
| 2,179 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "rpy2.robjects.r",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "rpy2.robjects",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "rpy2.robjects.conversion.localconverter",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "rpy2.robjects.default_converter",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "rpy2.robjects",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "rpy2.robjects.pandas2ri.converter",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "rpy2.robjects.pandas2ri",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "rpy2.robjects.conversion.rpy2py",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "rpy2.robjects.conversion",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "rpy2.robjects",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "multiplier.MultiplierProjection",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "numpy.allclose",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 72,
"usage_type": "attribute"
}
] |
215727216
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django.shortcuts import render, render_to_response
from datetime import datetime
from base_api.full_views.helper import get_request_param_as_string
from base_api.models import *
from base_api.form import *
from django.http import *
from django.contrib.auth import authenticate, login, logout
from django.core.exceptions import ObjectDoesNotExist
def full_add_edit_company(request):
if not request.user.is_active:
return HttpResponseRedirect('/login/')
out = {}
user_role = Roles.objects.get(id=request.user.id).role
if user_role == 2 or user_role == 1 or user_role == 3:
return HttpResponseRedirect('/oops/')
else:
out.update({'user_role': user_role})
get_params = '?'
get_params += get_request_param_as_string(request)
if request.method == 'POST':
form = CompanyForm(request.POST)
if 'pk' in request.POST:
id_company = request.POST['pk']
title = request.POST['title']
last_name = request.POST['last_name']
name = request.POST['name']
patronymic = request.POST['patronymic']
new_company = Companies(id=id_company, title=title, last_name=last_name, name=name, patronymic=patronymic)
new_company.save(force_update=True)
return HttpResponseRedirect('/companies/' + get_params)
else:
if form.is_valid():
title = form.cleaned_data['title']
last_name = form.cleaned_data['last_name']
name = form.cleaned_data['name']
patronymic = form.cleaned_data['patronymic']
if Companies.objects.filter(title=title, is_deleted=0).count() == 0:
new_company = Companies.objects.create(title=title, last_name=last_name, name=name,
patronymic=patronymic)
return HttpResponseRedirect('/companies/' + get_params)
else:
out.update({"error": 1})
out.update({'page_title': "Добавление компании"})
else:
out.update({'page_title': "Добавление компании"})
else:
if 'id' in request.GET:
id_company = request.GET['id']
out.update({"error": 0})
company = Companies.objects.get(pk=id_company)
form = CompanyForm({'title': company.title, 'last_name': company.last_name,
'name': company.name, 'patronymic': company.patronymic})
out.update({'page_title': "Редактирование компании"})
else:
form = CompanyForm()
out.update({'page_title': "Добавление компании"})
out.update({'company_form': form})
return render(request, 'company/add_edit_company.html', out)
def full_delete_company(request):
if not request.user.is_active:
return HttpResponseRedirect('/login/')
user_role = Roles.objects.get(id=request.user.id).role
if user_role == 2 or user_role == 1 or user_role == 3:
return HttpResponseRedirect('/oops/')
id = request.GET['id']
company = Companies.objects.get(pk=id)
company.is_deleted = 1
company.save(update_fields=["is_deleted"])
get_params = '?'
if 'page' in request.GET:
page = int(request.GET['page'])
get_params += 'page=' + str(page) + '&'
if 'length' in request.GET:
length = int(request.GET['length'])
get_params += 'length=' + str(length) + '&'
if 'sort' in request.GET:
sort = int(request.GET['sort'])
get_params += 'sort=' + str(sort) + '&'
return HttpResponseRedirect('/companies/' + get_params)
def full_get_companies(request):
if not request.user.is_active:
return HttpResponseRedirect('/login/')
out = {}
if 'page' in request.GET and 'length' in request.GET:
page = int(request.GET['page'])
length = int(request.GET['length'])
start = (page - 1) * length
out.update({'start': start})
user_role = Roles.objects.get(id=request.user.id).role
if user_role == 2 or user_role == 1 or user_role == 3:
return HttpResponseRedirect('/oops/')
else:
out.update({'user_role': user_role})
companies = Companies.objects.filter(is_deleted=0)
for c in companies:
c.full_name = c.last_name + ' ' + c.name + ' ' + c.patronymic
out.update({'page_title': "Компании"})
out.update({'companies': companies})
out.update({'count': companies.count()})
return render(request, 'company/get_companies.html', out)
| null |
base_api/full_views/company_views.py
|
company_views.py
|
py
| 4,721 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "base_api.full_views.helper.get_request_param_as_string",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 108,
"usage_type": "call"
}
] |
250564316
|
from solver.SymbolPool import *
from solver.Tensor import *
from solver.LoopStacker import *
import pdb
import math
from amplpy import AMPL
import amplpy
import time
from solver.SolutionWarp import *
from joblib import Parallel, delayed
class ModfileBuilder:
def __init__(self, sympool, idx_list, level_list, bw_list, parallel_list, parallelism, pbsize_dict, capacity_list, fplist, costlist, parallel_ids):
self.sympool = sympool
self.cons_id = 0
self.idx_list = idx_list
self.level_list = level_list
self.parallel_list = parallel_list # parallel level eg:[3]
self.parallelism = parallelism # parallelism map to parallel level
self.bw_list = bw_list
self.pbsize_dict = pbsize_dict
self.lratio = 1.00001
self.capacity_list = capacity_list
self.fpexpr_list = fplist
self.costexpr_list = costlist
self.parallel_ids = parallel_ids # parallable indices eg: [x,y,f]
def buildVar(self, name, lb=None, ub=None, isInt=False):
res = 'var ' + name
if lb:
res += ' >= '+ str(lb)
if ub:
res += ' <=' + str(ub)
if isInt:
res += ' integer'
res+=';\n'
return res
def buildLeCons(self, small, large):
self.cons_id += 1
return 'subject to c' + str(self.cons_id)+': ' + str(small) + ' <= ' + str(large) + ';\n'
def buildEqCons(self, lhs, rhs):
self.cons_id += 1
return 'subject to c' + str(self.cons_id)+': ' + str(lhs) + ' == ' + str(rhs) + ';\n'
def buildVars(self ):
res=''
res+='# declare memory lv\n'
for idx in self.idx_list:
lv = self.level_list[-1]+1
name = str(self.sympool.get_sym(idx=idx, tlv=lv))
res += self.buildVar(name, lb=self.pbsize_dict.get(idx), ub=self.pbsize_dict.get(idx))
res+='# declare cache lvs\n'
for lv in self.level_list:
for idx in self.idx_list:
name = str(self.sympool.get_sym(idx=idx, tlv=lv))
res += self.buildVar(name, lb=1, ub=self.pbsize_dict.get(idx))
res+='# declare parallel lvs\n'
for lv in self.parallel_list:
for idx in self.idx_list:
name = str(self.sympool.get_sym(idx=idx, tlv=lv))
res += self.buildVar(name, lb=1, ub=self.pbsize_dict.get(idx))
res +='# declare cost and footprints\n'
for lv in self.level_list:
res += self.buildVar('cost'+str(lv))
res += self.buildVar('fp'+str(lv))
return res
def buildTrivalCons(self):
res=''
res+='# trival cons cache lvs\n'
for lv in self.level_list:
for idx in self.idx_list:
lhs = self.sympool.get_sym(idx=idx, tlv=lv)
rhs= self.sympool.get_sym(idx=idx, tlv=lv+1)
res += self.buildLeCons(lhs, rhs)
res+='# trival cons parallel lvs\n'
for lv in self.parallel_list:
for idx in self.idx_list:
lhs = self.sympool.get_sym(idx=idx, tlv=lv)
rhs= self.sympool.get_sym(idx=idx, tlv=lv+1)
res += self.buildLeCons(lhs, rhs)
return res
def buildFpCons(self):
res = ''
for lv , capacity, fpexpr in zip(self.level_list, self.capacity_list, self.fpexpr_list):
name = 'fp'+str(lv)
res += self.buildEqCons(lhs=name, rhs=fpexpr)
res += self.buildLeCons(small=name, large=capacity)
return res
def buildCostDefs(self):
res = ''
for lv, costexpr in zip(self.level_list, self.costexpr_list):
name = 'cost'+str(lv)
res += self.buildEqCons(lhs=name, rhs=costexpr)
return res
def buildInvariants(self):
decls = self.buildVars()
frozen_cons = self.buildTrivalCons()+ self.buildFpCons() + self.buildCostDefs()
frozen_cons += self.buildParallelCons()
return decls, frozen_cons
def buildVariants(self, target_lv, effective_list):
return self.buildTarget(target_lv) + self.buildCostCons(target_lv, effective_list)
def buildTarget(self, target_lv):
return 'minimize maxcost: cost' + str(target_lv) + ';\n'
def buildCostCons(self, target_lv, effective_list):
res = ''
for elv in effective_list:
if elv != target_lv:
res += self.buildLeCons(small=str(self.lratio) +' * cost'+str(elv),
large= 'cost'+str(target_lv))
return res
#This is original parallel dispatch, xy and f are equal weight
####################################################################
# def buildParallelCons(self): #
# assert(len(self.parallel_list)<=1) #
# res = '' #
# for lv, factor in zip(self.parallel_list, self.parallelism): #
# parallel_expr = 1 #
# for idx in self.parallel_ids: #
# Tx = self.sympool.get_sym(idx=idx, tlv=lv) #
# Nx = self.sympool.get_sym(idx=idx, tlv=lv+1) #
# parallel_expr *= Nx/Tx #
# res += self.buildEqCons(lhs=parallel_expr, rhs=factor) #
# return res #
####################################################################
# This is a priority-f parallel dispatch, it will first try parallel f before other parallel idx
def buildParallelCons(self):
assert(len(self.parallel_list)<=1)
parallel_f = False
res = ''
factor = self.parallelism[0]
if 'f' in self.parallel_ids:
parallel_f = True
pbsz_f = self.pbsize_dict['f']
maxf_fork = math.ceil(pbsz_f/16)
maxf_fork = factor/ math.ceil(factor/maxf_fork)
plv = self.parallel_list[0]
Tf = self.sympool.get_sym(idx='f', tlv=plv)
Nf = self.sympool.get_sym(idx='f', tlv=plv+1)
paraf_expr = Nf/Tf
res += self.buildEqCons(lhs=paraf_expr, rhs=maxf_fork)
factor /=maxf_fork
for lv in self.parallel_list:
parallel_expr = 1
for idx in self.parallel_ids:
if idx == 'f':
continue
Tx = self.sympool.get_sym(idx=idx, tlv=lv)
Nx = self.sympool.get_sym(idx=idx, tlv=lv+1)
parallel_expr *= Nx/Tx
res += self.buildEqCons(lhs=parallel_expr, rhs=factor)
return res
class ModGen:
def __init__(self, glb_range, tensors, idx_list, fp_coeffs, cost_coeffs, level_list, bw_list, capacity_list, parallel_list, parallelism, parallel_ids, numAB=[6,16]):
self.pbsize_dict=glb_range
self.pool = TileSymbolPool()
self.tensors = tensors
self.idx_list = idx_list
self.fp_coeffs = fp_coeffs
self.cost_coeffs = cost_coeffs
self.level_list = level_list
self.bw_list = bw_list
self.parallel_list = parallel_list
self.capacity_list = capacity_list
self.parallelism = parallelism # parallelism map to parallel level
self.parallel_ids = parallel_ids # parallable indices eg: [x,y,f]
self.level_fp_list = []
self.level_pmucost_dict = {}
self.all_nest_cost={}
self.numAB = numAB
#erase criteria:
#w and h is symmetric
#w and h is degenerate (guaranteed by ukr)
#x never iterate before y(guaranteed by fused idx in ukr)
self.erase_pmu=[('w', 'c', 'h', 'b', 'f', 'x', 'y'),
('f', 'w', 'b', 'x', 'y', 'c', 'h'),
('f', 'h', 'b', 'x', 'y', 'c', 'w'),
('f', 'x', 'b', 'y', 'c', 'w', 'h'),
('x', 'b', 'y', 'f', 'c', 'w', 'h')]
self.lv0_pmu=[('h', 'c', 'w', 'b', 'f', 'x', 'y')]
def build_repeatance(self, lv):
repeatance = 1
for idx in self.idx_list:
Nx = self.pool.get_sym(idx=idx, tlv=self.level_list[-1]+1)
Tx = self.pool.get_sym(idx=idx, tlv=lv+1)
repeatance *= Nx/Tx
if len(self.parallel_list)>0 and len(self.parallelism) == len(self.parallel_list) and lv<self.parallel_list[0]:
repeatance /= self.parallelism[0]
return repeatance
def one_lv_cost_fp(self, cache_lv):
loop_pruner = LoopPruner(tensors=self.tensors,
idx_list=self.idx_list,
cache_lv = cache_lv,
fp_coeffs=self.fp_coeffs,
cost_coeffs=self.cost_coeffs)
if cache_lv>0:
return cache_lv, loop_pruner.prune_cost(sym_pool=self.pool, erase_pmu=self.erase_pmu)
else:
return cache_lv, loop_pruner.prune_cost(sym_pool=self.pool, erase_pmu=self.erase_pmu, keep_pmu=self.lv0_pmu)
def build_all_lv_cost_fp(self):
level_pmucost_list = Parallel(n_jobs=-1)(delayed(self.one_lv_cost_fp)(cache_lv ) for cache_lv in self.level_list)
for elem in level_pmucost_list:
self.level_pmucost_dict[elem[0]] = elem[1]
for cache_lv in self.level_list:
print('cache lv ', cache_lv, ' volume: ', self.level_pmucost_dict[cache_lv])
tot_fp = 0
fp_builder = FootprintBuilder()
for ts in self.tensors:
tot_fp += fp_builder.buildFP(tensor=ts, cache_lv=cache_lv, sym_pool=self.pool, fp_coeff=self.fp_coeffs.get(ts.name))
self.level_fp_list.append(tot_fp)
def nextnest(self, cur_lv, partial_nest_cost):
lv_list=self.level_list
if cur_lv >=len(lv_list):
nest=tuple(partial_nest_cost[0])
cost=list(partial_nest_cost[1])
self.all_nest_cost[nest]=cost
return
cache_lv=lv_list[cur_lv]
for pmu in self.level_pmucost_dict[cache_lv].keys():
partial_nest_cost[0].append(pmu)
partial_nest_cost[1].append(self.level_pmucost_dict[cache_lv].get(pmu)[1])
self.nextnest(cur_lv+1, partial_nest_cost)
partial_nest_cost[0].pop()
partial_nest_cost[1].pop()
def create_nest_cost_map(self):
self.nextnest(cur_lv=0, partial_nest_cost=[[],[]] )
print('num of nest candidates: ', len(self.all_nest_cost))
# final_candidate_list =[self.fix_create_run(nest_tuple=k, cost_list=self.all_nest_cost.get(k)) for k in self.all_nest_cost.keys()]
final_candidate_list = Parallel(n_jobs=-1)(delayed(self.fix_create_run)(nest_tuple=k, cost_list=self.all_nest_cost.get(k)) for k in self.all_nest_cost.keys())
copy_candidate_list = []
for cand in final_candidate_list:
if not isinstance(cand, str):
copy_candidate_list.append(cand)
comp_score = lambda arg_list: arg_list[0]
assert(len(copy_candidate_list)>0)
copy_candidate_list.sort(key=comp_score)
print ('best candidate: ', copy_candidate_list[0])
# return candidate format: cost_score, bottleneck_list, fix(tile)_dict, nest_tuple
assert len(copy_candidate_list) > 0, 'Solution not found!!!!'
return copy_candidate_list[0:5]
def nest_to_string(self, nest_key):
retstr = ''
for pmu in nest_key:
for i in pmu:
retstr+=i
retstr+='_'
return retstr
def create_modfile_name(self, nest_tuple, target_lv, effective_lv):
res = 'mods/nest'
for pmu in nest_tuple:
for idx in pmu:
res+=idx
res+='-'
res += 'target' + str(target_lv)
res += 'effect'
for i in effective_lv:
res += str(i)
res +='.mod'
return res
def create_modfile(self, nest_tuple, cost_list, target_lv, effective_lv):
assert (target_lv in effective_lv)
cost_expr_list = []
for tlv, cost_dict, bw in zip(self.level_list, cost_list, self.bw_list):
sum_cost = 0
for ts_cost in cost_dict.values():
sum_cost += ts_cost
sum_cost *= self.build_repeatance(tlv)
sum_cost /= bw
cost_expr_list.append(sum_cost)
modfile_builder = ModfileBuilder(self.pool, idx_list=self.idx_list,
level_list=self.level_list, bw_list=self.bw_list,
parallel_list=self.parallel_list, parallelism=self.parallelism,
parallel_ids=self.parallel_ids,
pbsize_dict = self.pbsize_dict, capacity_list=self.capacity_list,
costlist=cost_expr_list, fplist=self.level_fp_list,)
decls, invars = modfile_builder.buildInvariants()
varies = modfile_builder.buildVariants(target_lv=target_lv, effective_list=effective_lv)
# print(decls)
# print(varies)
# print(invars)
mod_file_content = decls + varies + invars
mod_file_name = self.create_modfile_name(nest_tuple, target_lv, effective_lv)
modfile = open(mod_file_name, "w")
modfile.write(mod_file_content)
modfile.close()
return mod_file_name, cost_expr_list
def fix_create_run(self, nest_tuple, cost_list):
effective_lv = list(self.level_list)
fix_dict = {'L0Tb':1, 'L0Tx':1, 'L0Ty':self.numAB[0], 'L0Tf':self.numAB[1], 'L0Tc':1, 'L0Tw':1, 'L0Th':1, 'L1Tw':self.pbsize_dict['w'], 'L1Th':self.pbsize_dict['h'] }
cost_score = []
bottleneck_list = []
all_tiles = {}
while len(effective_lv)>0:
best_target_cost = [-1, math.inf, {}] # best lv, best lv cost, tile names fixed by best lv
for target_lv in effective_lv:
try:
ampl = AMPL()
ampl.setOption('solver', 'ipopt')
modfile_name, cost_expr_list = self.create_modfile(nest_tuple, cost_list, target_lv, effective_lv)
ampl.read(modfile_name)
fixed_vars = []
for fixtile_name in fix_dict.keys():
print(fixtile_name)
fixvar = ampl.getVariable(name=fixtile_name)
fixvar.fix(value=fix_dict.get(fixtile_name))
fixed_vars.append(fixvar)
ampl.solve()
maxcost = ampl.getObjective('maxcost')
tofix_tiles = {}
target_expr = cost_expr_list[self.level_list.index(target_lv)]
for tofix_t in target_expr.free_symbols:
fixvar = ampl.getVariable(str(tofix_t))
tofix_tiles[str(tofix_t)] = math.floor(fixvar.value())
if maxcost.value() < best_target_cost[1] and maxcost.exitcode()==0 and 'Optimal Solution Found' in maxcost.message():
best_target_cost[0] = target_lv
best_target_cost[1] = maxcost.value()
best_target_cost[2] = dict(tofix_tiles)
if len(effective_lv) == 1:
for lv in list(self.level_list) + list(self.parallel_list):
for idx in self.idx_list:
Tx_name=str(self.pool.get_sym(idx=idx, tlv=lv))
Tx_var = ampl.getVariable(name=Tx_name)
Tx_value = math.floor(Tx_var.value())
all_tiles[Tx_name] = Tx_value
ampl.close()
except Exception as e:
print(e)
raise
if (best_target_cost[0] <0):
return 'invalid'
delete_lv = best_target_cost[0]
bottle_cost = best_target_cost[1]
print('lv', delete_lv, 'is bottleneck ', bottle_cost )
tofix_tile_dict = best_target_cost[2]
cost_score.append(best_target_cost[1])
bottleneck_list.append(best_target_cost[0])
popidx = effective_lv.index(delete_lv)
effective_lv.pop(popidx)
for tofix in tofix_tile_dict.keys():
if fix_dict.get(tofix) == None:
fix_dict[tofix] = tofix_tile_dict.get(tofix)
for fkey in fix_dict.keys():
pblv = 'L' + str(self.level_list[-1]+1)
if pblv in fkey:
continue
fv = fix_dict.get(fkey)
av = all_tiles.get(fkey)
if(fv != av):
print('fkey fv av:' ,fkey, fv, av)
assert (av == fv )
return cost_score, bottleneck_list, all_tiles, nest_tuple
def warp_sol(self, solution, fuse_tile, erase_tile, special_scale):
sol_warp = SolutionWarp(self.idx_list, self.level_list, solution, self.parallel_list, self.parallelism, self.parallel_ids,self.pbsize_dict, special_scale, numAB=self.numAB)
return sol_warp.fuse_tiles(fuse_tile, erase_tile)
| null |
TileLoopGenerator/solver/ModGen.py
|
ModGen.py
|
py
| 17,950 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "math.ceil",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "joblib.Parallel",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "joblib.delayed",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "joblib.Parallel",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "joblib.delayed",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "math.inf",
"line_number": 347,
"usage_type": "attribute"
},
{
"api_name": "amplpy.AMPL",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 379,
"usage_type": "call"
}
] |
221459793
|
#!/usr/bin/env python3
import numpy as np
from scipy.io import loadmat
def load_data(filename, verbose=True):
# Loading the data from the E-Coli Matrix and then extracting data for their respective types.
print(f"Loading dataset: {filename}...")
data = loadmat(filename)
x_train_np = data['xTrain']
y_train_np = data['yTrain']
x_test_np = data['xTest']
y_test_np = data['yTest']
if verbose:
print("\nSome information about the given data:")
print(x_train_np.shape[0],"total training samples having",x_train_np.shape[1],"features each")
print(x_test_np.shape[0],"total testing samples having",x_test_np.shape[1],"features each")
# Converting the numpy arrays in raw python dictionaries for training data
x_train_list = [list(x) for x in x_train_np]
x_train = [{'feature_' + str(n+1):v for n,v in enumerate(x)} for x in x_train_list ]
y_train = ['class_' + str(el) for el in y_train_np.reshape(-1)]
# Converting the numpy arrays in raw python dictionaries for testing data
x_test_list = [list(x) for x in x_test_np]
x_test = [{'feature_' + str(n+1):v for n,v in enumerate(x)} for x in x_test_list ]
y_test = ['class_' + str(el) for el in y_test_np.reshape(-1)]
return x_train, y_train, x_test, y_test
def assess_accuracy(filename, pred):
print(f"Loading dataset: {filename}...")
data = loadmat(filename)
x_test_np = data['xTest']
y_test_np = data['yTest']
nb = np.array(pred)
_, cor_count = np.unique(y_test_np.reshape(-1) == nb, return_counts=True)
pred_inc, pred_cor = tuple(cor_count)
print ('Accuracy:', pred_cor/(pred_cor+pred_inc))
print("\n")
for label in range(1, 6):
_, pred_count = np.unique(nb == label, return_counts=True)
pred_neg, pred_pos = tuple(pred_count)
truth = y_test_np.reshape(-1) == label
_, cond_count = np.unique(y_test_np.reshape(-1) == label, return_counts=True)
cond_neg, cond_pos = tuple(cond_count)
true_pos = np.unique(np.logical_and(nb == label, y_test_np.reshape(-1) == label), return_counts=True)[1][1]
print ("Class", label)
print ('Prediction positive:', pred_pos)
print ('Condition positive:', cond_pos)
print ('True positive:', true_pos)
print ('Precision:', true_pos/pred_pos)
print ('Recall:', true_pos/cond_pos)
print ('\n')
# eof
| null |
Mid 1/Problem3_Naive Bayes/problem_utils.py
|
problem_utils.py
|
py
| 2,417 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "scipy.io.loadmat",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "scipy.io.loadmat",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.logical_and",
"line_number": 50,
"usage_type": "call"
}
] |
304022528
|
import networkx as nx
import random
import numpy as np
import walkers.healthState
#import Walker
#import Virus
import structures.locations as l
import matplotlib.pyplot as plt
class City():
def __init__(self, nLocations):
self.pHome=0.24
self.otherP= (1 - self.pHome) / 4.0
degreeSeq=[]
seqSum=0
dict = {} #dict that associates node IDs to type of location(costants). Used for labeling in drawing
ObjDict={} #dict that associates node IDs to istances of locations Classes
for i in range(0,nLocations-1):
temp = random.randint(4,9)
degreeSeq.append(temp)
seqSum += temp
dict[i] = np.random.choice([l.HOME, l.WORKPLACE, l.GROCERIES_STORE, l.SCHOOL, l.LEISURE], 1,p=[self.pHome, self.otherP, self.otherP, self.otherP, self.otherP])[0]
if dict[i]==l.HOME:
ObjDict[i]=l.buildDefaultHome()
elif dict[i]==l.WORKPLACE:
ObjDict[i] = l.buildDefaultWorkplace()
elif dict[i]==l.GROCERIES_STORE:
ObjDict[i] = l.buildDefaultStore()
elif dict[i]==l.SCHOOL:
ObjDict[i] = l.buildDefaultSchool()
else:
ObjDict[i] = l.buildDefaultLeisure()
temp = 0
while (seqSum + temp)%2 !=0:
temp = random.randint(4, 9)
degreeSeq.append(temp)
seqSum += temp
dict[nLocations-1] = np.random.choice([l.HOME, l.WORKPLACE, l.GROCERIES_STORE, l.SCHOOL, l.LEISURE], 1,p=[self.pHome, self.otherP, self.otherP, self.otherP, self.otherP])[0]
if dict[nLocations-1] == l.HOME:
ObjDict[nLocations-1] = l.buildDefaultHome()
elif dict[nLocations-1] == l.WORKPLACE:
ObjDict[nLocations-1] = l.buildDefaultWorkplace()
elif dict[nLocations-1] == l.GROCERIES_STORE:
ObjDict[nLocations-1] = l.buildDefaultStore()
elif dict[nLocations-1] == l.SCHOOL:
ObjDict[nLocations-1] = l.buildDefaultSchool()
else:
ObjDict[nLocations-1] = l.buildDefaultLeisure()
self.G = nx.configuration_model(degreeSeq)
nx.set_node_attributes(self.G,ObjDict,"LocType")
print(nx.get_node_attributes(self.G,"LocType"))
self.G.nodes()
nx.draw(self.G,nx.random_layout(self.G),labels=dict, with_labels=True)
plt.show()
c = City(10)
| null |
City.py
|
City.py
|
py
| 2,461 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "random.randint",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "structures.locations.HOME",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "structures.locations",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "structures.locations.WORKPLACE",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "structures.locations.GROCERIES_STORE",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "structures.locations.SCHOOL",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "structures.locations.LEISURE",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "structures.locations.HOME",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "structures.locations",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "structures.locations.buildDefaultHome",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "structures.locations",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "structures.locations.WORKPLACE",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "structures.locations",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "structures.locations.buildDefaultWorkplace",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "structures.locations",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "structures.locations.GROCERIES_STORE",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "structures.locations",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "structures.locations.buildDefaultStore",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "structures.locations",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "structures.locations.SCHOOL",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "structures.locations",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "structures.locations.buildDefaultSchool",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "structures.locations",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "structures.locations.buildDefaultLeisure",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "structures.locations",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "structures.locations.HOME",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "structures.locations",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "structures.locations.WORKPLACE",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "structures.locations.GROCERIES_STORE",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "structures.locations.SCHOOL",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "structures.locations.LEISURE",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "structures.locations.HOME",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "structures.locations",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "structures.locations.buildDefaultHome",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "structures.locations",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "structures.locations.WORKPLACE",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "structures.locations",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "structures.locations.buildDefaultWorkplace",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "structures.locations",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "structures.locations.GROCERIES_STORE",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "structures.locations",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "structures.locations.buildDefaultStore",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "structures.locations",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "structures.locations.SCHOOL",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "structures.locations",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "structures.locations.buildDefaultSchool",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "structures.locations",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "structures.locations.buildDefaultLeisure",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "structures.locations",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "networkx.configuration_model",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "networkx.set_node_attributes",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "networkx.get_node_attributes",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "networkx.draw",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "networkx.random_layout",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 57,
"usage_type": "name"
}
] |
253821342
|
# -*- coding: utf-8 -*-
"""
The ``bot`` module
======================
Use it to init a Tock BOT.
:Example:
>>> import os
>>>> from tock.bot import TockBot
>>> TockBot().namespace("my-bot").start_websocket(apikey=os.environ['TOCK_APIKEY'])
"""
import asyncio
import logging
from datetime import datetime
from typing import Callable, Type, List
from tock.bus import TockBotBus, BotBus
from tock.context import Context
from tock.intent import IntentName, Intent
from tock.models import TockMessage, BotRequest, BotMessage, BotResponse, ResponseContext
from tock.schemas import TockMessageSchema
from tock.story import Story, ErrorStory, Stories, story
from tock.webhook import TockWebhook
from tock.websocket import TockWebsocket
class TockBot:
def __init__(self):
self.__logger: logging.Logger = logging.getLogger(__name__)
self.__namespace: str = "default"
self.__bus = TockBotBus
self.__stories = Stories()
self.__error_handler: Callable = lambda bus: bus.send("Default error handler")
self.__context = Context()
def namespace(self, namespace: str):
self.__namespace = namespace
return self
def register_bus(self, bus: BotBus):
self.__bus = bus
return self
def error_handler(self, handler: Callable):
self.__error_handler = handler
return self
def add_story(self, intent_name: IntentName, answer: Callable):
story_class: Type[Story] = story(intent_name)(answer)()
self.register_story(story_class)
return self
def register_story(self, story: Type[Story]):
self.__stories.register_story(story)
return self
def start_webhook(self,
host: str,
path: str,
port: int):
TockWebhook(
host=host,
path=path,
port=port,
bot_handler=self.__bot_handler
).start()
def start_websocket(self,
apikey: str = 'apikey_is_undefined',
host: str = 'demo-bot.tock.ai',
port: int = 443,
protocol: str = 'wss'):
loop = asyncio.get_event_loop()
loop.run_until_complete(TockWebsocket(
apikey=apikey,
host=host,
port=port,
protocol=protocol,
bot_handler=self.__bot_handler
).start())
def __bot_handler(self, tock_message: TockMessage) -> str:
messages: List[BotMessage] = []
request: BotRequest = tock_message.bot_request
story_class: Type[Story] = self.__stories.find_story(Intent(request.intent), self.__context.current_story)
self.__context.entities = self.__context.entities + request.entities
self.__context.current_story = story_class
bus = self.__bus(
context=self.__context,
send=lambda bot_message: messages.append(bot_message),
request=request
)
if story_class is not None:
self.__logger.info("story found %s for intent %s", story_class.__name__, request.intent)
story = self.__create(story_class, bus)
else:
self.__logger.info("No story for intent %s", request.intent)
story = ErrorStory(request=request, answer=self.__error_handler)
try:
story.answer(bus)
except:
self.__logger.exception("Unexpected error")
response = TockMessage(
bot_response=BotResponse(
messages=messages,
story_id="story_id",
step=None,
context=ResponseContext(
request_id=tock_message.request_id,
date=datetime.now()
),
entities=[]
),
request_id=tock_message.request_id,
)
tock_response: str = TockMessageSchema().dumps(response)
return tock_response
def __create(self, story_class: Type[Story], bus: BotBus):
story = story_class(request=bus.request)
for entity in bus.context.entities:
entity_type = entity.type.split(":")[1]
if hasattr(story, entity_type):
setattr(
story,
entity_type,
entity.content
)
return story
| null |
tock/bot.py
|
bot.py
|
py
| 4,450 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.Logger",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "tock.bus.TockBotBus",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "tock.story.Stories",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "typing.Callable",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "tock.context.Context",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "tock.bus.BotBus",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "tock.intent.IntentName",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "tock.story.Story",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "tock.story.story",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "typing.Type",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "tock.story.Story",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "tock.story.story",
"line_number": 59,
"usage_type": "argument"
},
{
"api_name": "tock.webhook.TockWebhook",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "tock.websocket.TockWebsocket",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "tock.models.TockMessage",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "tock.models.BotMessage",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "tock.models.BotRequest",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "tock.story.Story",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "tock.intent.Intent",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "tock.story.story",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "tock.story.story",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "tock.story.ErrorStory",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "tock.story.story.answer",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "tock.story.story",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "tock.models.TockMessage",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "tock.models.BotResponse",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "tock.models.ResponseContext",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "tock.schemas.TockMessageSchema",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "typing.Type",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "tock.story.Story",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "tock.bus.BotBus",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "tock.story.story",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "tock.story.story",
"line_number": 133,
"usage_type": "argument"
},
{
"api_name": "tock.story.story",
"line_number": 135,
"usage_type": "argument"
},
{
"api_name": "tock.story.story",
"line_number": 139,
"usage_type": "name"
}
] |
258568137
|
# -*- coding: utf-8 -*-
from hashlib import md5
from random import getrandbits
import requests
import six
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from requests.exceptions import ConnectionError, ConnectTimeout
from config import Config
from ..errors import ASServerBusy
def generate_route_log(func, args, kwargs):
params = ', '.join([
'%s=%s' % (k, v) for k, v in kwargs.items()
])
return '%s(%s)' % (func.__name__, params)
class Crypto:
"""Token生成与解析
"""
def __init__(self, *args, **kwargs):
self.expires_in = Config.EXPIRES_IN
self.salt_len = 13
def _rand_str(self, length=0):
assert str(length).isdigit(), 'In randStr: <length> is not digits'
m = md5()
m.update(str(getrandbits(256)))
digest = m.hexdigest()
if int(length) is 0:
return digest
else:
return digest[:int(length)]
def encrypt(self, content):
"""
>> dumped is then split into two parts:
vm30kl324mtre 834n532093...
^^^^^^^^^^^^^ ^^^^^^^^^^^^^
part1 part2
>> len(part1) == self.salt_len
>> the returned token contains three parts:
vm30kl324mtre 9w3kn03n32l9n 834n532093...
^^^^^^^^^^^^^ ^^^^^^^^^^^^^ ^^^^^^^^^^^^^
part1 r part2
>> token == part1 + r + part2
"""
r = self._rand_str(self.salt_len)
key = r + Config.SECRET_KEY
dumped = Serializer(key, expires_in=self.expires_in).dumps(content)
return dumped[:self.salt_len] + r + dumped[self.salt_len:]
def decrypt(self, token):
r = token[self.salt_len:(2 * self.salt_len)]
key = r + Config.SECRET_KEY
dumped = token[:self.salt_len] + token[(2 * self.salt_len):]
try:
return Serializer(key).loads(dumped)
except:
return None
class SyncRemote:
"""将本地数据同步到远程的索引库中"""
def __init__(self):
self.timeout = 5.00
self.host = Config.REMOTE_HOST
self.port = Config.REMOTE_PORT
self.funcMap = {
'GET': self._get,
'POST': self._post
}
def sync(self, path, method, params, json=None, data=None):
reqFunc = self.funcMap[method]
url = self._generate_url(path)
try:
resp = reqFunc(url, params, json, data)
return self._capsulate_response(resp)
except Exception as error:
return self._handle_error(error)
def _get(self, url, params, *args):
return requests.get(url, params=params)
def _post(self, url, params, json=None, data=None):
return requests.post(url, params=params, data=data, json=json)
def _generate_url(self, path):
path = path[1:] if path[0] == '/' else path
url = 'http://{host}:{port}/{path}'.format(
host=self.host, port=self.port, path=path
)
return url
def _capsulate_response(self, resp):
try:
respCode = resp.json().get('errId')
isSync = True if respCode == 0 else False
return isSync
except:
return False
def _handle_error(self, error):
if isinstance(error, ConnectionError):
raise ASServerBusy(u'同步服务器无法链接')
elif isinstance(error, ConnectTimeout):
raise ASServerBusy(u'同步服务器连接超时')
else:
pass
| null |
python/Flask/RestApiTemplate/api/common/helpers.py
|
helpers.py
|
py
| 3,578 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "config.Config.EXPIRES_IN",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "config.Config",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "hashlib.md5",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "random.getrandbits",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "config.Config.SECRET_KEY",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "config.Config",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "itsdangerous.TimedJSONWebSignatureSerializer",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "config.Config.SECRET_KEY",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "config.Config",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "itsdangerous.TimedJSONWebSignatureSerializer",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "config.Config.REMOTE_HOST",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "config.Config",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "config.Config.REMOTE_PORT",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "config.Config",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "requests.exceptions.ConnectionError",
"line_number": 112,
"usage_type": "argument"
},
{
"api_name": "errors.ASServerBusy",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "requests.exceptions.ConnectTimeout",
"line_number": 114,
"usage_type": "argument"
},
{
"api_name": "errors.ASServerBusy",
"line_number": 115,
"usage_type": "call"
}
] |
602704683
|
import cv2
import numpy as np
import os
import foundation
def package_rects_and_weights(rects, weights, scoreThreshold=0.6):
result = list()
for (rect, score) in zip(rects, weights):
if score[0] < scoreThreshold or score[0] > 1.0:
continue
result.append({
'score': score[0],
'box': tuple(rect)
})
result = foundation.non_max_suppression(result)
return result
class HumanScanner(object):
'''
Scanner implementation that returns a list of bounding boxes for any
humans detected in a given image.
'''
def __init__(self, scoreThreshold=0.6, winStride=(8, 8), padding=(8, 8), scale=1.05):
self.scoreThreshold = scoreThreshold
self.winStride = winStride
self.padding = padding
self.scale = scale
self.hog = cv2.HOGDescriptor()
self.hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
def scan(self, image):
(rects, weights) = self.hog.detectMultiScale(
image,
winStride=self.winStride,
padding=self.padding,
scale=self.scale
)
result = list()
for (rect, score) in zip(rects, weights):
if score[0] < self.scoreThreshold or score[0] > 1.0:
continue
result.append({
'label': 'person',
'score': score[0],
'box': tuple(rect)
})
result = foundation.non_max_suppression(result)
return sorted(result, key=lambda x: x['score'], reverse=True)
class FaceScanner(object):
def __init__(self):
weights_path = os.path.join(
cv2.__path__[0],
'data',
'haarcascade_frontalface_default.xml'
)
self.frontalface = cv2.CascadeClassifier(weights_path)
weights_path = os.path.join(
cv2.__path__[0],
'data',
'haarcascade_profileface.xml'
)
self.profileface = cv2.CascadeClassifier(weights_path)
def scan(
self,
image,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
):
result = list()
for rect in self.frontalface.detectMultiScale(
image,
scaleFactor=scaleFactor,
minNeighbors=minNeighbors,
minSize=minSize,
flags=flags,
):
result.append({
'label': 'face',
'score': 0.99,
'box': tuple(rect)
})
for rect in self.profileface.detectMultiScale(
image,
scaleFactor=scaleFactor,
minNeighbors=minNeighbors,
minSize=minSize,
flags=flags,
):
result.append({
'label': 'face',
'score': 0.99,
'box': tuple(rect)
})
return foundation.non_max_suppression(result)
| null |
leapvision/scanner.py
|
scanner.py
|
py
| 3,018 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "foundation.non_max_suppression",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.HOGDescriptor",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cv2.HOGDescriptor_getDefaultPeopleDetector",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "foundation.non_max_suppression",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "cv2.__path__",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "cv2.__path__",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "cv2.CASCADE_SCALE_IMAGE",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "foundation.non_max_suppression",
"line_number": 103,
"usage_type": "call"
}
] |
381512108
|
from collections import deque
import numpy as np
import pygame
import curses
from events import *
from controllers import *
from history import *
from views import *
from models import *
import palette
def get_images(square_size, grid_size):
SPACE_SIZE = np.array(square_size)
W, H = square_size
X = pygame.Surface(SPACE_SIZE)
X.set_colorkey(C_WHITE)
X.fill(C_WHITE)
pygame.draw.line(X, C_BLACK, SPACE_SIZE//4, SPACE_SIZE*3//4, 4)
pygame.draw.line(X, C_BLACK, (W//4, H*3//4), (W*3//4, H//4), 4)
O = pygame.Surface(SPACE_SIZE)
O.set_colorkey(C_WHITE)
O.fill(C_WHITE)
pygame.draw.circle(O, C_BLACK, SPACE_SIZE//2, SPACE_SIZE[0]//4, 4)
cursor = palette.square(SPACE_SIZE, fgcolor=C_TRANS, bgcolor=C_TRANS, bordercolor=C_RED)
squares = [palette.square(SPACE_SIZE)]
background = palette.grid(grid_size, squares)
return background, {'X': X, 'O': O, 'cursor': cursor}
class TTTGame(Broadcaster):
def __init__(self, board_size=(3, 3), win_length=3):
super().__init__()
self.board_size = board_size
self.win_length = win_length
self.board = Grid(self.board_size)#, container_type=SingletonContainer)
self.containers = { 'board': self.board }
self.history = HistoryTree()
self.reset()
def is_winning_move(self, actor):
chain = self.board.get_longest_chain(actor)
return len(chain) >= self.win_length
def reset(self):
self.board.remove_all()
self.players = deque(['X', 'O'])
self.current_player = self.players[0]
def play(self, location):
actor = Actor(self.current_player)
if not self.board.add(actor, location):
return
self.history.record(location)
# check win condition
if self.is_winning_move(actor):
self.broadcast(Event('VICTORY', self.current_player))
self.reset()
return True
# next player
self.players.rotate(-1)
self.current_player = self.players[0]
return True
def undo(self):
last_move = self.history.unrecord()
if last_move is None:
return
self.board.remove_at(last_move)
# previous player
self.players.rotate()
self.current_player = self.players[0]
return True
def event(self, e):
if str(e) == 'PLAY':
if self.play(e.arg):
self.broadcast(E_UPDATE) # Currently only for debugging
elif str(e) == 'UNDO':
if self.undo():
self.broadcast(E_UPDATE)
elif str(e) == 'RESET':
self.reset()
def run(stdscr, global_events):
# Pygame setup
SQUARE_SIZE = (40, 40)
GRID_SIZE = (3, 3)
background, images = get_images(SQUARE_SIZE, GRID_SIZE)
pygame.init()
pygame.display.set_caption('Tic Tac Toe')
screen = pygame.display.set_mode(np.array(SQUARE_SIZE) * GRID_SIZE)
grid_view = GridView(
square_size=SQUARE_SIZE,
surface=screen,
background=background,
image_map=images,
)
# MVC setup
event_debugger = EventDebugger()
game = TTTGame()
grid = game.containers['board']
grid.subscribe(grid_view)
grid.subscribe(event_debugger)
objects = {
'event_debugger' : event_debugger,
'game' : game,
'spinner_controller' : PygameSpinner(),
'key_listener' : PygameKeyListener(),
'game_controller' : GridController(grid),
#'view_root' : root_view,
'view_grid' : grid_view,
}
#root_view.add_child(grid_view)
for o in objects.values():
global_events.subscribe(o)
objects['spinner_controller'].run()
def main():
global_events = Channel()
#curses.wrapper(run, global_events)
run(None, global_events)
if __name__ == '__main__':
main()
| null |
ttt.py
|
ttt.py
|
py
| 3,919 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.array",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pygame.draw.line",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.line",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "pygame.Surface",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pygame.draw.circle",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "palette.square",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "palette.square",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "palette.grid",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "pygame.init",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 102,
"usage_type": "call"
}
] |
272917270
|
#!/usr/bin/env python
# @ Params ======================================================================================
load_range = range(30)
dt = 0.01
corr_len = 100.
avg_intvl = 10
img_slice = ':'
two_term = False
# sub_bind = False
sub_bind = True
# @ Main ========================================================================================
import numpy as np
from ase import units
dt_ase = dt *1e3 *units.fs
from ase.io import read
hfacf = []
temp = []
volu = []
for i in load_range:
fname = 'job-{}/kappa-whole/lmp-results.traj_dt{}_c{}_n{}_a{}_t{}_b{}.npy'.format(i, dt, corr_len, img_slice, avg_intvl, two_term, sub_bind)
hfacf.append(np.load(fname))
temp.append(np.load('{}-temp.npy'.format(fname)))
volu.append(np.load('{}-volu.npy'.format(fname)))
hfacf = np.mean(hfacf, axis=0)
mean_temp = np.mean(temp)
mean_volu = np.mean(volu)
#
norm_hfacf = hfacf / hfacf[0]
kappa = np.add.accumulate(hfacf) *dt_ase /units.kB /mean_temp**2 /mean_volu *units._e *units.second *1e10
avg_norm_hfacf = []
for i in range(len(hfacf)):
tmp = []
for j in range(3):
tmp.append(norm_hfacf[i,j,j])
avg_norm_hfacf.append(np.mean(tmp))
avg_kappa = []
for i in range(len(kappa)):
tmp = []
for j in range(3):
tmp.append(kappa[i,j,j])
avg_kappa.append(np.mean(tmp))
from matplotlib import pyplot as plt
start = dt/2.
t = np.arange(len(kappa), dtype=float) *dt +start
fig, ax1 = plt.subplots(3,3)
for i in range(3):
for j in range(3):
ax2 = ax1[i,j].twinx()
ax1[i,j].plot(t, norm_hfacf[:,i,j], c='b')
ax2.plot(t, kappa[:,i,j], c='r')
#
ax1[i,j].set_xlabel('Time (ps)', fontsize='x-large')
ax1[i,j].set_ylabel('Scaled HFACF (Arb. Unit)', fontsize='x-large', color='b')
ax2.set_ylabel('$\kappa_{}$$_{}$ (W/mK)'.format(i+1, j+1), fontsize='x-large', color='r')
ax1[i,j].tick_params(axis="x",direction="in", labelsize='x-large')
ax1[i,j].tick_params(axis="y",direction="in", labelsize='x-large', labelcolor='b')
ax2.tick_params(axis="y",direction="in", labelsize='x-large',colors='r', labelcolor='r')
ax1[i,j].grid(alpha=0.5)
ax2.grid(alpha=0.5)
plt.title('IS={}'.format(img_slice), fontsize='x-large')
plt.subplots_adjust(left=0.10, bottom=0.05, right=0.90, top=0.95, wspace=0.80, hspace=0.40)
# Average
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.plot(t, avg_norm_hfacf[:], c='b')
ax2.plot(t, avg_kappa[:], c='r')
#
ax1.set_xlabel('Time (ps)', fontsize='x-large')
ax1.set_ylabel('Scaled HFACF (Arb. Unit)', fontsize='x-large', color='b')
ax2.set_ylabel('$\kappa$ (W/mK)', fontsize='x-large', color='r')
ax1.tick_params(axis="x",direction="in", labelsize='x-large')
ax1.tick_params(axis="y",direction="in", labelsize='x-large', labelcolor='b')
ax2.tick_params(axis="y",direction="in", labelsize='x-large',colors='r', labelcolor='r')
ax1.grid(alpha=0.5)
ax2.grid(alpha=0.5)
plt.title('M) IS={}, AI={}, dt={}, T={:.2f}'.format(img_slice, avg_intvl, dt, mean_temp))
plt.subplots_adjust(left=0.15, bottom=0.15, right=0.85, top=0.90)
plt.show()
| null |
skel/gst/nequip/md/gk/nve/300k/1fs/1-kooi/12x12x3/plot-avg-whole.py
|
plot-avg-whole.py
|
py
| 3,104 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "ase.units.fs",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "ase.units",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "numpy.load",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.add.accumulate",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.add",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "ase.units.kB",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "ase.units",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "ase.units._e",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "ase.units.second",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "numpy.mean",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots_adjust",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots_adjust",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 87,
"usage_type": "name"
}
] |
481478590
|
import dapp
import datetime
import io
import textwrap
import time
import traceback
from contextlib import redirect_stdout
class Meta(dapp.DiscordPlugin):
def __init__(self, bot):
super(Meta, self).__init__(bot)
self.last_result = None
def cleanup_code(self, content):
if content.startswith('```') and content.endswith('```'):
return '\n'.join(content.split('\n')[1:-1])
return content.strip('` \n')
@dapp.command()
async def ping(self, ctx):
t1 = time.perf_counter()
await ctx.trigger_typing()
t2 = time.perf_counter()
time_delta = round((t2-t1)*1000)
await self.bot.send(":ping_pong: | `{}ms`".format(time_delta), ctx)
@dapp.command()
@dapp.is_owner()
async def exit(self, ctx):
await self.bot.send(":wave: | Goodbye!", ctx, 0xFF0000)
raise SystemExit(await self.bot.logout())
@dapp.command()
async def uptime(self, ctx):
td = datetime.datetime.now() - self.bot.start_time
days, hours, minutes = td.days, td.seconds // 3600, td.seconds // 60 % 60
await self.bot.send(":clock1: | {} day(s) {} hour(s) {} minute(s).".format(days, hours, minutes), ctx)
@dapp.command()
async def source(self, ctx):
await ctx.send('<https://bitbucket.org/jorkermc/gridcoinpricebot/src/master/>')
@dapp.command()
async def help(self, ctx):
e = dapp.Embed(colour=self.bot.default_colour, title='GridcoinPriceBot')
e.set_footer(text="Made by jorkermc#3727", icon_url='https://cdn.discordapp.com/attachments/381963689470984203/452757349657083904/jorkermc.png')
commands = [('help', 'Shows help about a command or the bot.'),
('source', 'Links you to the source code of the bot.'),
('uptime', 'Shows how long the bot has been up.'),
('ping', 'Calculates the ping between the bot and the discord servers.'),
('price', 'Returns the price and a little more data for a cryptocurrency'),
('markets', 'Returns the markets for a cryptocurrrency.'),
('usd', 'Returns the price of a cryptocurrency in USD')]
for command in commands:
e.add_field(name=command[0], value=command[1])
await ctx.send(embed=e)
@dapp.command(name='eval')
@dapp.is_owner()
async def _eval(self, ctx, *, body: str):
env = {
'bot': self.bot,
'ctx': ctx,
'channel': ctx.channel,
'author': ctx.author,
'guild': ctx.guild,
'message': ctx.message,
'_': self.last_result
}
env.update(globals())
body = self.cleanup_code(body)
stdout = io.StringIO()
to_compile = f'async def func():\n{textwrap.indent(body, " ")}'
try:
exec(to_compile, env)
except Exception as e:
return await ctx.send(f'```py\n{e.__class__.__name__}: {e}\n```')
func = env['func']
try:
with redirect_stdout(stdout):
ret = await func()
except Exception as e:
value = stdout.getvalue()
await ctx.send(f'```py\n{value}{traceback.format_exc()}\n```')
else:
value = stdout.getvalue()
try:
await ctx.message.add_reaction('\u2705')
except:
pass
if ret is None:
if value:
await ctx.send(f'```py\n{value}\n```')
else:
self._last_result = ret
await ctx.send(f'```py\n{value}{ret}\n```')
def setup(bot):
bot.add_cog(Meta(bot))
| null |
plugins/meta.py
|
meta.py
|
py
| 3,834 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "dapp.DiscordPlugin",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "time.perf_counter",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "dapp.command",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "dapp.command",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "dapp.is_owner",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "dapp.command",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "dapp.command",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "dapp.Embed",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "dapp.command",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "io.StringIO",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "textwrap.indent",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "contextlib.redirect_stdout",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "traceback.format_exc",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "dapp.command",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "dapp.is_owner",
"line_number": 63,
"usage_type": "call"
}
] |
12974135
|
import json
import unittest2
from google.appengine.ext import testbed
from datafeeds.parsers.fms_api.fms_api_event_alliances_parser import FMSAPIEventAlliancesParser
class TestFMSAPIEventListParser(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
def tearDown(self):
self.testbed.deactivate()
def test_parse_no_alliances(self):
with open('test_data/fms_api/2016_no_alliances.json', 'r') as f:
alliances = FMSAPIEventAlliancesParser().parse(json.loads(f.read()))
self.assertIsNone(alliances)
def test_parse_8alliances(self):
with open('test_data/fms_api/2016_nyny_alliances.json', 'r') as f:
alliances = FMSAPIEventAlliancesParser().parse(json.loads(f.read()))
self.assertTrue(isinstance(alliances, list))
self.assertEqual(len(alliances), 8)
def test_parse_16alliances(self):
with open('test_data/fms_api/2016_micmp_alliances_staging.json', 'r') as f:
alliances = FMSAPIEventAlliancesParser().parse(json.loads(f.read()))
self.assertTrue(isinstance(alliances, list))
self.assertEqual(len(alliances), 16)
def test_parse_4team(self):
with open('test_data/fms_api/2015_curie_alliances.json', 'r') as f:
alliances = FMSAPIEventAlliancesParser().parse(json.loads(f.read()))
self.assertTrue(isinstance(alliances, list))
self.assertEqual(len(alliances), 8)
| null |
tests/test_fms_api_event_alliances_parser.py
|
test_fms_api_event_alliances_parser.py
|
py
| 1,607 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "unittest2.TestCase",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "google.appengine.ext.testbed.Testbed",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.testbed",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "datafeeds.parsers.fms_api.fms_api_event_alliances_parser.FMSAPIEventAlliancesParser",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datafeeds.parsers.fms_api.fms_api_event_alliances_parser.FMSAPIEventAlliancesParser",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "datafeeds.parsers.fms_api.fms_api_event_alliances_parser.FMSAPIEventAlliancesParser",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "datafeeds.parsers.fms_api.fms_api_event_alliances_parser.FMSAPIEventAlliancesParser",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 42,
"usage_type": "call"
}
] |
30784595
|
"""
code related to creating a session
"""
import typing as t
from contextlib import contextmanager
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from utils import get_database_url
engine = create_engine(get_database_url(), echo=False, pool_size=10)
Session = sessionmaker(bind=engine)
@contextmanager
def session_scope() -> t.Any:
"""Provide a transactional scope around a series of operations."""
session = Session()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
| null |
app/db/session.py
|
session.py
|
py
| 609 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sqlalchemy.create_engine",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "utils.get_database_url",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "contextlib.contextmanager",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 17,
"usage_type": "attribute"
}
] |
406872957
|
from django.shortcuts import render
from django.views.decorators.csrf import csrf_protect, csrf_exempt
# from django.template.context_processors import csrf
from hashlib import sha512
import hashlib
# Create your views here.
def index(request):
MERCHANT_KEY = "33y8dMBB"
SALT = "HIRqERoClU"
PAYU_BASE_URL = "https://sandboxsecure.payu.in/_payment"
# PAYU_BASE_URL = "https://secure.payu.in"
action = ""
txnid = "ABC12345671234567891"
hashh = ""
hash_string = ""
posted = {}
posted['txnid'] = txnid
if request.method == 'POST':
action = PAYU_BASE_URL
for i in request.POST:
posted[i] = request.POST[i]
hashSequence = "key|txnid|amount|productinfo|firstname|email|udf1|udf2|udf3|udf4|udf5|udf6|udf7|udf8|udf9|udf10";
hashVarsSeq = hashSequence.split('|')
for i in hashVarsSeq:
try:
hash_string+=str(posted[i])
except Exception:
hash_string+=""
hash_string+="|"
hash_string+=SALT
tempHash = sha512(hash_string.encode('utf-8')).hexdigest().lower()
hashh = tempHash
mycontext = {"head":"PayU Money","MERCHANT_KEY":MERCHANT_KEY, "posted":posted, "hashh":hashh, "hash_string":hash_string, "txnid":txnid, "action":action}
return render(request,'payu/paymentform.html', context=mycontext)
@csrf_protect
@csrf_exempt
def success(request):
# c = {}
# c.update(csrf(request))
status = request.POST['status']
firstname = request.POST['firstname']
amount = request.POST['amount']
txnid = request.POST['txnid']
posted_hash = request.POST['hash']
key = request.POST['key']
productinfo = request.POST['productinfo']
email = request.POST['email']
SALT = "HIRqERoClU"
try:
additionalCharges = request.POST['additionalCharges']
retHashSeq = additionalCharges+'|'+SALT+'|'+status+'|||||||||||'+email+'|'+firstname+'|'+productinfo+'|'+amount+'|'+txnid+'|'+key
except Exception:
retHashSeq = SALT+'|'+status+'|||||||||||'+email+'|'+firstname+'|'+productinfo+'|'+amount+'|'+txnid+'|'+key
hashh = hashlib.sha512(retHashSeq.encode('utf-8')).hexdigest().lower()
if (hashh != posted_hash):
paymentStatus = "Invalid Transaction. Please try again"
else:
paymentStatus = "Thank You. Your order status is %s. \n Your Transaction ID for this transaction is %s.\n We have received a payment of Rs. %s\n"%(status,txnid,amount)
t = {"finalstatus": str(paymentStatus)}
return render(request, 'payu/success.html', context=t)
@csrf_protect
@csrf_exempt
def fail(request):
# c = {}
# c.update(csrf(request))
status = request.POST['status']
firstname = request.POST['firstname']
amount = request.POST['amount']
txnid = request.POST['txnid']
posted_hash = request.POST['hash']
key = request.POST['key']
productinfo = request.POST['productinfo']
email = request.POST['email']
SALT = "HIRqERoClU"
try:
additionalCharges = request.POST['additionalCharges']
retHashSeq = additionalCharges+'|'+SALT+'|'+status+'|||||||||||'+email+'|'+firstname+'|'+productinfo+'|'+amount+'|'+txnid+'|'+key
except Exception:
retHashSeq = SALT+'|'+status+'|||||||||||'+email+'|'+firstname+'|'+productinfo+'|'+amount+'|'+txnid+'|'+key
hashh = hashlib.sha512(retHashSeq.encode('utf-8')).hexdigest().lower()
if (hashh != posted_hash):
paymentStatus = "Invalid Transaction. Please try again"
else:
paymentStatus = "Thank You. Your order status is %s. \n Your Transaction ID for this transaction is %s.\n We have received a payment of Rs. %s\n"%(status,txnid,amount)
t = {"finalstatus": str(paymentStatus)}
return render(request, 'payu/fail.html', context=t)
| null |
views.py
|
views.py
|
py
| 3,960 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "hashlib.sha512",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "hashlib.sha512",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "django.views.decorators.csrf.csrf_protect",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "django.views.decorators.csrf.csrf_exempt",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "hashlib.sha512",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "django.views.decorators.csrf.csrf_protect",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "django.views.decorators.csrf.csrf_exempt",
"line_number": 70,
"usage_type": "name"
}
] |
344787531
|
import json
from django.utils import timezone
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from .models import ProductionTask, MCPriority
import atlas.deftcore.api.client as deft
_deft_client = deft.Client(settings.DEFT_AUTH_USER, settings.DEFT_AUTH_KEY)
# Mapping between task actions and DEFT task actions
_deft_actions = {
'kill': 'abort_task',
'finish': 'finish_task',
'change_priority': 'change_task_priority',
'reassign_to_site': 'reassign_task_to_site',
'reassign_to_cloud': 'reassign_task_to_cloud',
'retry': 'retry_task',
'change_ram_count': 'change_task_ram_count',
'change_wall_time': 'change_task_wall_time',
}
supported_actions = _deft_actions.keys()
supported_actions.extend(['obsolete', 'increase_priority', 'decrease_priority'])
def do_action(owner, task_id, action, *args):
result = dict(owner=owner, task=task_id, action=action, args=args,
status=None, accepted=False, registered=False,
exception=None, exception_source=None)
if not action in supported_actions:
result['exception'] = "Action '%s' is not supported" % action
return result
if action in _deft_actions:
result.update(_do_deft_action(owner, task_id, action, *args))
elif action == 'increase_priority':
result.update(increase_task_priority(owner, task_id, *args))
elif action == 'decrease_priority':
result.update(decrease_task_priority(owner, task_id, *args))
elif action == 'obsolete':
result.update(obsolete_task(owner, task_id))
return result
def _do_deft_action(owner, task_id, action, *args):
"""
Perform task action using DEFT API
:param owner: username form which task action will be performed
:param task_id: task ID
:param action: action name
:param args: additional arguments for the action (if needed)
:return: dictionary with action execution details
"""
result = dict(owner=owner, task=task_id, action=action, args=args,
status=None, accepted=False, registered=False,
exception=None, exception_source=None)
if not action in _deft_actions:
result['exception'] = "Action '%s' is not supported" % action
return result
try:
func = getattr(_deft_client, _deft_actions[action])
except AttributeError as e:
result.update(exception=str(e))
return result
try:
request_id = func(owner, task_id, *args)
except Exception as e:
result.update(exception=str(e),
exception_source=_deft_client.__class__.__name__)
return result
result['accepted'] = True
try:
status = _deft_client.get_status(request_id)
except Exception as e:
result.update(exception=str(e),
exception_source=_deft_client.__class__.__name__)
return result
result.update(registered=True, status=status)
return result
def obsolete_task(owner, task_id):
"""
Mark task as 'obsolete'
:param owner: username form which task action will be performed
:param task_id: task ID
:return: dict with action status
"""
result = dict(owner=owner, task_id=task_id,
accepted=True, registered=False, exception=None)
# TODO: add logging
# TODO: add logging with PandaLog (using DEFT API)
try:
task = ProductionTask.objects.get(id=task_id)
except ObjectDoesNotExist:
result['exception'] = "Task '%s' does not exist" % task_id
return result
except Exception as error:
result['exception'] = str(error)
return result
if task.status not in ['done', 'finished']:
result['exception'] = "Task '%s' is in the state '%s', not 'done' or 'finished'" % (task_id, task.status)
return result
#TODO: log action
ProductionTask.objects.filter(id=task_id).update(status='obsolete', timestamp=timezone.now())
result['registered'] = True
return result
def change_task_priority(owner, task_id, priority):
"""
Set task JEDI priority.
:param task_id: task ID
:param priority: JEDI task priority
:return: dict with action status
"""
# TODO: add status checking and logging
return _do_deft_action(owner, task_id, 'change_priority', priority)
def get_task_priority_levels(task_id):
"""
Get task priority levels (if any) for the task
:param task_id: task ID
:return: dict containing available levels, current level and task priority
"""
def get_priority_levels():
""" Get JEDI priority levels from the step template
:return: dict of JEDI priority of the step { name: {level: priority, ...}, ...}
"""
levels_ = {}
for prio in MCPriority.objects.all():
try:
named_priorities = json.loads(prio.priority_dict)
except:
continue
for name, priority in named_priorities.items():
if not levels_.get(name):
levels_[name] = {}
levels_[name][int(prio.priority_key)] = priority
return levels_
result = dict(id=task_id, current_level=None, levels={}, current_priority=None)
try:
task = ProductionTask.objects.get(id=task_id)
except ObjectDoesNotExist:
result.update(reason="Task not found")
return result
step = task.step
slice_priority = step.slice.priority
result["current_priority"] = int(task.current_priority or task.priority)
if slice_priority < 100: # having a priority level here
step_name = step.step_template.step
levels = get_priority_levels()
result["levels"] = levels.get(step_name, {})
result["successful"] = True
return result
def shift_task_priority(owner, task_id, level_shift, priority_shift=None):
"""
Shifting task priority up or down
:param owner: username form which task action will be performed
:param task_id: task ID
:param level_shift: if > 0, increasing the priority, otherwise decreasing.
Has precedence over priority_shift.
:param priority_shift: value of priority shift to apply
:return:
"""
levels_info = get_task_priority_levels(task_id)
levels = levels_info.get("levels")
current_prio = levels_info.get("current_priority")
result = dict()
if levels:
levels = levels.values()
if not levels and (priority_shift is not None):
return change_task_priority(owner, task_id, current_prio+priority_shift)
if level_shift > 0:
next_priorities = sorted([x for x in levels if x > current_prio])
else:
next_priorities = sorted([x for x in levels if x < current_prio])
if not next_priorities: # limit value is reached
return result
new_priority = next_priorities[0]
return change_task_priority(owner, task_id, new_priority)
def increase_task_priority(owner, task_id, delta=None):
"""
Increase task priority for one level or specified value
:param owner: username form which task action will be performed
:param task_id: task ID
:param delta: value to change priority on
:return:
"""
if isinstance(delta, int):
return shift_task_priority(owner=owner, task_id=task_id, level_shift=-1, priority_shift=delta)
else:
return shift_task_priority(owner=owner, task_id=task_id, level_shift=-1)
def decrease_task_priority(owner, task_id, delta=None):
"""
Decrease task priority for one level or specified value
:param owner: username form which task action will be performed
:param task_id: task ID
:param delta: value to change priority on
:return:
"""
if isinstance(delta, int):
return shift_task_priority(owner=owner, task_id=task_id, level_shift=1, priority_shift=-delta)
else:
return shift_task_priority(owner=owner, task_id=task_id, level_shift=1)
| null |
atlas/prodtask/task_actions.py
|
task_actions.py
|
py
| 8,001 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "atlas.deftcore.api.client.Client",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "atlas.deftcore.api.client",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.DEFT_AUTH_USER",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.DEFT_AUTH_KEY",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "models.ProductionTask.objects.get",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "models.ProductionTask.objects",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "models.ProductionTask",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "django.core.exceptions.ObjectDoesNotExist",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "models.ProductionTask.objects.filter",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "models.ProductionTask.objects",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "models.ProductionTask",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "models.MCPriority.objects.all",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "models.MCPriority.objects",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "models.MCPriority",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "models.ProductionTask.objects.get",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "models.ProductionTask.objects",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "models.ProductionTask",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "django.core.exceptions.ObjectDoesNotExist",
"line_number": 164,
"usage_type": "name"
}
] |
134005912
|
"""The main part of the entigen tool."""
import argparse
import re
from typing import List, Dict, Optional
from .model import Model
from .readers.csv import CSVReader
from .writers.python import PythonWriter
from .writers.info import InfoWriter
from .extensible import Extensible
# Pattern for parsing argument-defined variables for writers
VARIABLE_PATTERN = r"(\w+)(=.*)?"
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('model',
help='Model source')
parser.add_argument('entities', nargs='*',
help='Entities to be included')
parser.add_argument('-b', '--block', dest='block_type',
help="Block type the writer writes")
parser.add_argument('-f', '--from', dest='reader',
default="csv",
help="Metamodel input format")
parser.add_argument('-t', '--to', dest='writer',
default="python",
help="Text output format")
parser.add_argument('-V', '--variable', dest='variables',
action="append",
help="Text output format")
def parse_variables(vars: Optional[List[str]]) -> Dict[str,str]:
"""Parse command line defined variables in the form ``name=value``. Returns
a dictionary where keys are variable names and values are variable values
parsed from the `vars`. If a variable name without value is specified (no
``=`` character) then value `True` is assumed to mark the variable as
"present" or as "flag".
"""
variables: Dict[str,str] = {}
if vars is None:
return variables
for var in vars:
match = re.match(VARIABLE_PATTERN, var)
if not match:
continue
name = match.groups()[0]
value = match.groups()[1]
if value is None:
variables[name] = True
else:
# Strip the leading `=`
variables[name] = value[1:]
return variables
def main() -> None:
args = parser.parse_args()
variables = parse_variables(args.variables)
model = Model()
reader = Extensible.readers[args.reader](model=model)
reader.read_model(args.model)
writer_factory = Extensible.writers[args.writer]
writer = writer_factory(model=model, variables=variables)
# If no block type is specified then default is used
block_type = args.block_type or writer.block_types[0]
block = writer.create_block(block_type, args.entities)
print(block)
| null |
entigen/main.py
|
main.py
|
py
| 2,529 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "re.match",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "model.Model",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "extensible.Extensible.readers",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "extensible.Extensible",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "extensible.Extensible.writers",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "extensible.Extensible",
"line_number": 83,
"usage_type": "name"
}
] |
440663271
|
from datetime import datetime, timezone
import requests, json, time
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
url = 'https://crst.izmiran.ru/crdt'
def obtain_data(station: str, dt_from: datetime, dt_to: datetime, channel: str='V'):
tfr, tto = [int(d.replace(tzinfo=timezone.utc).timestamp()) for d in (dt_from, dt_to)]
para = f'from={tfr}&to={tto}&station={station}&channel={channel}&coefs=saved'
uri = f'{url}/api/muones?{para}'
while True:
res = requests.get(uri, verify=False)
if res.status_code != 200:
print(f'request failed: {res.status_code}')
return None, None
body = json.loads(res.text)
status = body['status']
assert status != 'failed'
if status != 'ok':
print(f'{dt_from}: {status} {body.get("info") or ""}')
time.sleep(1)
else:
return body.get('data'), body.get('fields') # body['info'] should contain coefficients
if __name__ == '__main__':
dt_from = datetime(2021, 1, 5)
dt_to = datetime(2022, 4, 5)
data, fields = obtain_data('Apatity', dt_from, dt_to)
# convert timestamp to datetime
import numpy as np
data = np.array(data, dtype='object')
data[:,0] = [datetime.utcfromtimestamp(d) for d in data[:,0]]
# "pretty" print
print(" ".join([' date']+fields))
for l in data[:7]:
print("\t".join([str(i) for i in l]))
print('...')
for l in data[-7:]:
print("\t".join([str(i) for i in l]))
| null |
scripts/data_example.py
|
data_example.py
|
py
| 1,602 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.packages.urllib3.disable_warnings",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "requests.packages.urllib3.exceptions.InsecureRequestWarning",
"line_number": 4,
"usage_type": "argument"
},
{
"api_name": "requests.packages",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "datetime.timezone.utc",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "datetime.timezone",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcfromtimestamp",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 34,
"usage_type": "name"
}
] |
211384546
|
import csv
from sklearn.preprocessing import StandardScaler
from prepare_input import prepare_input
from prepare_score_rank_data import prepare_score_ranking
from train_ann import ann
from prepare_data import prepare_data
from pandas import DataFrame
if __name__ == '__main__':
X_columns = ['female ratio', 'teaching score', 'international score', 'research score', 'citations', 'income',
'student/staff ratio', 'international students', 'number of students']
X = [30, 95, 95, 75, 70, 70, 0.8, 30, 20000]
score_ranking_filename = 'score_ranking_2015.csv'
data_filename = 'times_data_2011_15.csv'
score_ranking_names = ['ranking', 'score']
data_names = ['female ratio', 'teaching score', 'international score', 'research score', 'citations', 'income',
'total score', 'student/staff ratio', 'international students', 'number of students']
#import score_ranking
raw_data = open(score_ranking_filename, 'rt')
reader = csv.reader(raw_data, delimiter=';', quoting=csv.QUOTE_NONE)
score_ranking = list(reader)
#import data
raw_data_2 = open(data_filename, 'rt')
reader_2 = csv.reader(raw_data_2, delimiter=';', quoting=csv.QUOTE_NONE)
data = list(reader_2)
#preparation
X_data, y_data, scaler = prepare_data(data)
score_ranking = prepare_score_ranking(score_ranking)
#Standardize
standardScaler = StandardScaler()
standardScaler.fit(X_data)
x = standardScaler.transform(X_data)
#ann
model = ann(x, y_data)
#preparation
X_prepared = prepare_input(X, scaler)
#Standardize
x = standardScaler.transform([X_prepared])
y = model.predict(DataFrame(x))[0]
haveRank = False
for k in score_ranking:
if y == k[1]:
rank = k[0]
haveRank = True
break
elif y > k[1]:
rank = k[0] - 1
haveRank = True
break
if not haveRank:
rank = "last"
print("projected rank: " + str(rank))
print("score of the university: " + str(y))
| null |
main_ann.py
|
main_ann.py
|
py
| 2,071 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "csv.reader",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "csv.QUOTE_NONE",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "csv.reader",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "csv.QUOTE_NONE",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "prepare_data.prepare_data",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "prepare_score_rank_data.prepare_score_ranking",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "train_ann.ann",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "prepare_input.prepare_input",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 51,
"usage_type": "call"
}
] |
69941922
|
'''
Cracking The Coding Interview
Chapter 8: Recursion/DP/Memoisation
Question 8.1: "Triple Step: A child is running up a staircase
with n steps and can hop either 1 step, 2 steps, or 3 steps
at a time. Implement a method to count how many possible ways
the child can run up the stairs."
'''
'''
Method1: Recursion
'''
def countSteps(n):
if n == 0 or n == 1:
return 1
elif n < 0:
return 0
else:
return countSteps(n-1) + countSteps(n-2) + countSteps(n-3)
'''
Method2: Memoization
'''
from collections import defaultdict
def countSteps_Memo(n):
memo = defaultdict(lambda:0)
memo[0] = 1
memo[1] = 1
def hop(n, memo):
if n == 0 or n == 1:
return 1
elif n < 0:
return 0
else:
for i in range(2, n+1):
if memo[i] == 0:
memo[i] = memo[i-1] + memo[i-2] + memo[i-3]
return memo[n]
return hop(n, memo)
def test_countSteps():
assert countSteps(2) == 2
assert countSteps_Memo(2) == 2
test_countSteps()
| null |
Interview/Google/tripleSet.py
|
tripleSet.py
|
py
| 1,060 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "collections.defaultdict",
"line_number": 26,
"usage_type": "call"
}
] |
282219627
|
"""
* Model : KNeighbors, Gaussian Naive Bayes,
* Neural Network model sangat sensitive pada data yg valuenya tidak di scale.
* Cross Validation score, menggunakan rata-rata
* Cross Validation, menggunakan Startified KFold, variasi dari KFold
"""
from Models.Classification.ClassifierModel import KNeighborsModel
from Models.Classification.ClassifierModel import NBGaussModel
from Models.Classification.ClassifierModel import DTreeModel
from Models.Classification.ClassifierModel import MultiLayerPerceptronModel
from DataManager import Manager
from sklearn.model_selection import cross_val_score, KFold
import numpy as np
import pandas as pd
import os
abspath = os.path.abspath(__file__)
this_script_path = os.path.dirname(abspath)
datasets_path = this_script_path + "\\Datasets"
os.chdir(datasets_path)
training_data_filename = "iris-train.csv"
test_data_filename = "iris-test.csv"
training_data_filename = "heartdisease-train.csv"
test_data_filename = "heartdisease-test.csv"
# training_data_filename = input("input training dataset file name: ")
# test_data_filename = input("input test dataset file name : ")
training_dataset = pd.read_csv(training_data_filename)
test_dataset = pd.read_csv(test_data_filename)
print("Load Dataset... ({}/{})".format(training_data_filename, test_data_filename))
manager = Manager(training_dataset=training_dataset, test_dataset=test_dataset)
manager.scale_data()
## INITIAL KNN MODEL
KNN_model = KNeighborsModel(n=5)
KNN_model.train(X=manager.X_train, Y=manager.Y_train)
## INITIAL GNB MODEL
GNB_model = NBGaussModel()
GNB_model.train(X=manager.X_train, Y=manager.Y_train)
## INITIAL DTREE MODEL
DT_model = DTreeModel()
DT_model.train(X=manager.X_train, Y=manager.Y_train)
## INITIAL MULTI-LAYER PERCEPTRON MODEL
MLP_model = MultiLayerPerceptronModel()
MLP_model.train(X=manager.X_train, Y=manager.Y_train)
scores_KNN = []
scores_GNB = []
scores_DT = []
scores_MLP = []
## PREDICT/TRAINING
GNB_model.predict_y(test_set_x=manager.X_train)
KNN_model.predict_y(test_set_x=manager.X_train)
DT_model. predict_y(test_set_x=manager.X_train)
MLP_model.predict_y(test_set_x=manager.X_train)
print("Training Accuracy with train_test_split approach: ")
print("KNN : {}%".format(KNN_model.get_accuracy(Y_test=manager.Y_train) * 100))
print("GNB : {}%".format(GNB_model.get_accuracy(Y_test=manager.Y_train) * 100))
print("DT : {}%".format(DT_model .get_accuracy(Y_test=manager.Y_train) * 100))
print("MLP : {}%".format(MLP_model.get_accuracy(Y_test=manager.Y_train) * 100))
scores_KNN.append(KNN_model.current_accuracy * 100)
scores_GNB.append(GNB_model.current_accuracy * 100)
scores_DT .append(DT_model .current_accuracy * 100)
scores_MLP.append(MLP_model.current_accuracy * 100)
## PREDICT/TESTING
GNB_model.predict_y(test_set_x=manager.X_test)
KNN_model.predict_y(test_set_x=manager.X_test)
DT_model. predict_y(test_set_x=manager.X_test)
MLP_model.predict_y(test_set_x=manager.X_test)
print("Test Accuracy: ")
print("KNN : {}%".format(KNN_model.get_accuracy(Y_test=manager.Y_test) * 100))
print("GNB : {}%".format(GNB_model.get_accuracy(Y_test=manager.Y_test) * 100))
print("DT : {}%".format(DT_model .get_accuracy(Y_test=manager.Y_test) * 100))
print("MLP : {}%".format(MLP_model.get_accuracy(Y_test=manager.Y_test) * 100))
scores_KNN.append(KNN_model.get_accuracy(Y_test=manager.Y_test) * 100)
scores_GNB.append(GNB_model.get_accuracy(Y_test=manager.Y_test) * 100)
scores_DT .append(DT_model .get_accuracy(Y_test=manager.Y_test) * 100)
scores_MLP.append(MLP_model.get_accuracy(Y_test=manager.Y_test) * 100)
#################################
### TRAIN TEST SPLIT APPROACH ###
#################################
manager.do_train_test_split()
## GNB
GNB_model.train(X=manager.X_train, Y=manager.Y_train)
GNB_model.predict_y(test_set_x=manager.X_test)
## KNN
knn_scores = []
k_n_range = range(1, 26)
for k in k_n_range:
KNN_model.set_k(n=k)
KNN_model.train(X=manager.X_train, Y=manager.Y_train)
KNN_model.predict_y(test_set_x=manager.X_test)
knn_scores.append(KNN_model.get_accuracy(Y_test=manager.Y_test))
## DT
DT_model.train(X=manager.X_train, Y=manager.Y_train)
DT_model.predict_y(test_set_x=manager.X_test)
## MLP
MLP_model.train(X=manager.X_train, Y=manager.Y_train)
MLP_model.predict_y(test_set_x=manager.X_test)
scores_KNN.append(max(knn_scores) * 100)
scores_GNB.append(GNB_model.get_accuracy(Y_test=manager.Y_test) * 100)
scores_DT .append(DT_model .get_accuracy(Y_test=manager.Y_test) * 100)
scores_MLP.append(MLP_model.get_accuracy(Y_test=manager.Y_test) * 100)
print("Training Accuracy with train_test_split approach: ")
print("KNN : {}%".format(max(knn_scores) * 100))
print("GNB : {}%".format(GNB_model.current_accuracy * 100))
print("DT : {}%".format(DT_model .current_accuracy * 100))
print("MLP : {}%".format(MLP_model.current_accuracy * 100))
#################################
### CROSS VALIDATION APPROACH ###
#################################
manager = Manager(training_dataset=training_dataset, test_dataset=test_dataset)
manager.scale_data()
print("Training Accuracy with cross validation approach: ")
## cross val StartifiedKFold
## KFold
# kf = KFold(n_splits=10)
cvs_knn = cross_val_score(KNN_model.model, manager.X_train, manager.Y_train.values.ravel(), cv=10).mean() * 100
cvs_gnb = cross_val_score(GNB_model.model, manager.X_train, manager.Y_train.values.ravel(), cv=10).mean() * 100
cvs_dt = cross_val_score(DT_model .model, manager.X_train, manager.Y_train.values.ravel(), cv=10).mean() * 100
cvs_mlp = cross_val_score(MLP_model.model, manager.X_train, manager.Y_train.values.ravel(), cv=10).mean() * 100
print(cvs_knn)
print(cvs_gnb)
print(cvs_dt)
print(cvs_mlp)
scores_KNN.append(cvs_knn)
scores_GNB.append(cvs_gnb)
scores_DT .append(cvs_dt)
scores_MLP.append(cvs_mlp)
from matplotlib import pyplot as plt
plt.plot(k_n_range, knn_scores)
plt.xlabel("K")
plt.ylabel("Score")
plt.show()
n_groups = 4
ax = plt.subplot()
index = np.arange(n_groups)
bar_width = 0.15
opacity = 0.5
print(tuple(scores_KNN))
rects1 = ax.bar(
index, tuple(scores_KNN), bar_width,
alpha=opacity,
color='b',
label='KNN'
)
rects2 = ax.bar(
index + bar_width, tuple(scores_GNB), bar_width,
alpha=opacity,
color='g',
label='GNB'
)
rects3 = ax.bar(
index + bar_width * 2, tuple(scores_DT), bar_width,
alpha=opacity,
color='y',
label='DT'
)
rects4 = ax.bar(
index + bar_width * 3, tuple(scores_MLP), bar_width,
alpha=opacity,
color='r',
label='MLP'
)
for rect in rects1:
ax.text(
rect.get_x() + rect.get_width()/5,
1.01*rect.get_height(),
str(int(rect.get_height())) + "%",
color='b',
fontweight='bold'
)
for rect in rects2:
ax.text(
rect.get_x() + rect.get_width()/5,
1.01*rect.get_height(),
str(int(rect.get_height())) + "%",
color='g',
fontweight='bold'
)
for rect in rects3:
ax.text(
rect.get_x() + rect.get_width()/5,
1.01*rect.get_height(),
str(int(rect.get_height())) + "%",
color='y',
fontweight='bold'
)
for rect in rects4:
ax.text(
rect.get_x() + rect.get_width()/5, # label horizontal position
1.01*rect.get_height(), # label vertical position
str(int(rect.get_height())) + "%",
color='r',
fontweight='bold'
)
plt.ylim([0, 100])
plt.xlabel('Context')
plt.ylabel('Scores')
plt.title("Scores by model {}/{} dataset".format(training_data_filename, test_data_filename))
plt.xticks(index + bar_width * 1.5, ('Training', 'Testing', 'Train Test Split', 'Cross Validation'))
plt.legend()
plt.tight_layout()
plt.show()
| null |
App.py
|
App.py
|
py
| 7,675 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.abspath",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.chdir",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "DataManager.Manager",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "Models.Classification.ClassifierModel.KNeighborsModel",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "Models.Classification.ClassifierModel.NBGaussModel",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "Models.Classification.ClassifierModel.DTreeModel",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "Models.Classification.ClassifierModel.MultiLayerPerceptronModel",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "DataManager.Manager",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.cross_val_score",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.cross_val_score",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.cross_val_score",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.cross_val_score",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 230,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 232,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 235,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 237,
"usage_type": "name"
}
] |
146338310
|
import pytest
from datetime import date, datetime
from analyzers.coverage_stop_areas import AnalyzeCoverageStopAreas
import os
pytestmark = pytest.mark.usefixtures("spark")
path = os.getcwd() + "/tests/fixtures/coverage_stop_areas"
date_2017_02_15 = datetime.utcfromtimestamp(1484473662).date()
date_2017_02_18 = datetime.utcfromtimestamp(1484758062).date()
def test_coverage_stop_area_no_journeys(spark):
start_date = date(2017, 1, 22)
end_date = date(2017, 1, 22)
analyzer = AnalyzeCoverageStopAreas(storage_path=path,
start_date=start_date,
end_date=end_date,
spark_session=spark,
database=None,
current_datetime=datetime(2017, 2, 15, 15, 10))
results = analyzer.get_data()
assert len(results) == 0
def test_coverage_stop_area_no_valid_found(spark):
start_date = date(2017, 1, 20)
end_date = date(2017, 1, 20)
analyzer = AnalyzeCoverageStopAreas(storage_path=path,
start_date=start_date,
end_date=end_date,
spark_session=spark,
database=None,
current_datetime=datetime(2017, 2, 15, 15, 10))
results = analyzer.get_data()
assert len(results) == 0
assert analyzer.get_log_analyzer_stats(datetime(2017, 2, 15, 15, 12)) == \
"[spark-stat-analyzer] [OK] [2017-02-15 15:12:00] [2017-02-15 15:10:00] [CoverageStopAreas] [120]"
@pytest.mark.parametrize("day,expected_results", [
(15,
[(u'auv', u'sa_2', u'stop 2', '', '', '', '', 1, date_2017_02_15, 2),
(u'auv', u'sa_4', u'stop 4', '', '', '', '', 1, date_2017_02_15, 2),
(u'auv', u'sa_1', u'stop 1', '', '', '', '', 1, date_2017_02_15, 1),
(u'auv', u'sa_3', u'stop 3', '', '', '', '', 1, date_2017_02_15, 2)]),
(18,
[(u'auv', u'sa_4', u'stop 4', '', '', '', '', 1, date_2017_02_18, 2),
(u'npdc', u'sa_4', u'stop 4', u'admin:xxx4', '', '', '', 0, date_2017_02_18, 1),
(u'auv', u'sa_3', u'stop 3', '', '', '', '', 1, date_2017_02_18, 2),
(u'auv', u'sa_2', u'stop 2', '', '', '', '', 1, date_2017_02_18, 2),
(u'auv', u'sa_1', u'stop 1', '', '', '', '', 1, date_2017_02_18, 1),
(u'npdc', u'sa_3', u'stop 3', '', u'on the styx', '', '', 0, date_2017_02_18, 1),
(u'npdc', u'sa_1', u'stop 1', '', '', u'123456', u'12', 0, date_2017_02_18, 1),
(u'npdc', u'sa_3', u'stop 3', '', '', '', '', 0, date_2017_02_18, 1),
(u'npdc', u'sa_2', u'stop 2', '', '', u'987654', u'98', 0, date_2017_02_18, 2)]),
])
def test_coverage_stop_area_count(spark, day, expected_results):
start_date = date(2017, 1, day)
end_date = date(2017, 1, day)
analyzer = AnalyzeCoverageStopAreas(storage_path=path,
start_date=start_date,
end_date=end_date,
spark_session=spark,
database=None,
current_datetime=datetime(2017, 2, 15, 15, 10))
results = analyzer.get_data()
assert len(results) == len(expected_results)
for result in results:
assert result in expected_results
| null |
tests/coverage_stop_areas_test.py
|
coverage_stop_areas_test.py
|
py
| 3,447 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pytest.mark.usefixtures",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcfromtimestamp",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.utcfromtimestamp",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "datetime.date",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "analyzers.coverage_stop_areas.AnalyzeCoverageStopAreas",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "analyzers.coverage_stop_areas.AnalyzeCoverageStopAreas",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "analyzers.coverage_stop_areas.AnalyzeCoverageStopAreas",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 42,
"usage_type": "attribute"
}
] |
534135207
|
import base64
import json
import os
import requests
PROJECT_ID = os.environ['PROJECT_ID']
PUBSUB_VERIFICATION_TOKEN = os.environ['PUBSUB_VERIFICATION_TOKEN']
def tfrecord_caller(data, _):
"""Background Cloud Function to be triggered by Cloud Storage.
This generic function logs relevant data when a file is changed.
This function will be called when a file is added to the bucket,
it will call the tfrecord_builder API with the file added.
Future work: could try to instead build the tfrecord directly here,
but not sure a 2gb RAM limited cloud function can handle tensorflow stuff
Args:
data (dict): The Cloud Functions event payload.
_ (google.cloud.functions.Context): Metadata of triggering event.
Returns:
None; the output is written to Stackdriver Logging
"""
if not data['contentType'].startswith('image') or not data['contentType'].startswith('video'):
print("Unhandled file type")
return
# Replace with project id and pub_pub_token (the format args)
url = 'http://tfrecord-builder-dot-{}.appspot.com/pubsub/push?token={}'.format(PROJECT_ID, PUBSUB_VERIFICATION_TOKEN)
response = requests.post(
url,
data=json.dumps({
"message": {
"data": base64.b64encode(
data['name'].encode('utf-8')
).decode('utf-8')
}
})
)
print('Response', response.text)
return
| null |
cloud_functions/tfrecord_caller/main.py
|
main.py
|
py
| 1,488 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.environ",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "requests.post",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "base64.b64encode",
"line_number": 39,
"usage_type": "call"
}
] |
124852407
|
from __future__ import print_function
__author__ = 'awave'
import cv2, sys
from helpers import download
from matplotlib import pyplot
# load the image and convert it to grayscale
def open_image(img_name):
key = cv2.waitKey(0)
esc = 27
if key != esc:
image = cv2.imread(img_name)
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# cv2.imshow("Original", image)
cv2.waitKey(0)
else:
cv2.destroyAllWindows()
def open_images(img_names):
for img_name in img_names:
image = cv2.imread(img_name)
cv2.imshow('img', img_name)
if __name__ == '__main__':
# RED = [0, 0, 2553]
# esc = 27
matplot_enabled = True
on = raw_input('Enable matplotlib? (y/n): ')
if on == 'y':
matplot_enabled = True
elif on == 'n':
matplot_enabled = False
image = cv2.imread('showCaptcha.jpg')
if matplot_enabled:
pyplot.imshow(image, cmap='gray', interpolation='bicubic')
pyplot.xticks([])
pyplot.yticks([])
pyplot.show()
else:
# Resizable window
# cv2.namedWindow('img', cv2.WINDOW_NORMAL)
cv2.imshow('img', image)
key = cv2.waitKey(0) & 0xFF
print(image.shape)
print(image.dtype)
print(image.size)
if key == 27:
cv2.destroyAllWindows()
sys.exit('Open-cv process closed')
elif key == ord('s'):
cv2.imwrite('capt.png', image)
cv2.destroyAllWindows()
sys.exit('Image saved')
cv2.waitKey(5000)
| null |
img_test.py
|
img_test.py
|
py
| 1,570 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cv2.waitKey",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "cv2.imshow",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 63,
"usage_type": "call"
}
] |
80054180
|
from flask import Flask
from flask import request
from flask import jsonify
from flask import send_from_directory
from text import sentence_similarity as sentence_similarity_api
from speech.analysis import main as analysis_api
from speech.emotion import emotion_api
from speech.utils import misc
import jsonpickle
import redis
from speech.transcription.transfer_learning import chunk_data_api as chunk_api
import tensorflow as tf
import tensorflow_hub as hub
import json
import uuid
from text import bert_mrpc as bert_api
import shutil
import os
app = Flask(__name__)
loaded_model = None
pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
embed = g = session = messages = output = None
def perform_graph_setup():
global embed,g,session,messages,output
print("Loading tensorflow graph for the first request")
module_url = "https://tfhub.dev/google/universal-sentence-encoder-large/3"
os.environ['TFHUB_CACHE_DIR']='/home/absin/tfhub'
#if os.path.exists(os.environ['TFHUB_CACHE_DIR']) and os.path.isdir(os.environ['TFHUB_CACHE_DIR']):
# shutil.rmtree(os.environ['TFHUB_CACHE_DIR'])
#os.makedirs(os.environ['TFHUB_CACHE_DIR'])
embed = hub.Module(module_url)
print("While first request loading hub module downloaded..")
g = tf.get_default_graph()
session = tf.Session(graph=g)
session.run([tf.global_variables_initializer(), tf.tables_initializer()])
messages = tf.placeholder(dtype=tf.string, shape=[None])
output = embed(messages)
print('Successfully initialized sentence similarity variables')
@app.route("/sentence_similarity_many", methods=['GET', 'POST'])
def sentence_similarity_many():
global embed,g,session,messages,output
if g == None:
perform_graph_setup()
sentence = request.form['sentence']
sentences = request.form['sentences']
return sentence_similarity_api.fast_sentence_similarity_many(sentence, sentences, g, output, session, messages)
@app.route("/sentence_similarity", methods=['GET', 'POST'])
def sentence_similarity():
global embed,g,session,messages,output
if g == None:
perform_graph_setup()
sentence1 = request.form['sentence1']
sentence2 = request.form['sentence2']
return sentence_similarity_api.fast_sentence_similarity(sentence1, sentence2, g, output, session, messages)
@app.route('/sentence_similarity_bert', methods=['GET', 'POST'])
def sentence_similarity_bert():
sentence1 = request.form["sentence1"]
sentence2 = request.form["sentence2"]
semantic_similarity = bert_api.predict(sentence1, sentence2)
print('Sentence1: {}\n Sentence2: {}\nSimilarity: {}'.format(sentence1, sentence2, semantic_similarity['similarity']))
return jsonpickle.encode(semantic_similarity)
@app.route("/transcibe", methods=['GET', 'POST'])
def transcibe():
task_id = request.args['task_id']
if task_id is None:
task_id = uuid.uuid4().hex
task_url = misc.get_task_url(task_id)
url = request.args.get("url")
if url is None:
print("URL not provided")
else:
task_url = url
language = request.args['language']
model = (request.args['model'] == 'True')
engine = request.args['engine']
if engine is None:
engine = 'google'
print('Started: '+task_id)
conversation_blocks = analysis_api.transcribe_emotion(engine, task_id, language, model, loaded_model, pool, task_url, False)
print('Finished: '+task_id)
return jsonpickle.encode(conversation_blocks)
@app.route("/transcibe_emotion", methods=['GET', 'POST'])
def transcibe_emotion():
task_id = request.args['task_id']
if task_id is None:
task_id = uuid.uuid4().hex
task_url = misc.get_task_url(task_id)
url = request.args.get("url")
if url is None:
print("URL not provided")
else:
task_url = url
language = request.args['language']
model = (request.args['model'] == 'True')
engine = request.args['engine']
if engine is None:
engine = 'google'
conversation_blocks = analysis_api.transcribe_emotion(engine, task_id, language, model, loaded_model, pool, task_url)
return jsonpickle.encode(conversation_blocks)
@app.route("/emotion", methods=['GET', 'POST'])
def emotion():
task_id = request.args['task_id']
if task_id is None:
task_id = uuid.uuid4().hex
url = request.args.get("url")
if url is None:
print("URL not provided")
task_url = misc.get_task_url(task_id)
else:
task_url = url
global loaded_model
if loaded_model is None:
loaded_model = emotion_api.getModel()
loaded_model._make_predict_function()
emotion_blocks = analysis_api.emotion(task_url, task_id, loaded_model)
return jsonpickle.encode(emotion_blocks)
@app.route("/emotion_stream", methods=['GET', 'POST'])
def emotion_stream():
task_id = request.args.get("task_id")
url = request.args.get("url")
global loaded_model
if loaded_model is None:
loaded_model = emotion_api.getModel()
loaded_model._make_predict_function()
emotion_blocks = emotion_api.windowing_emotion(url, task_id, loaded_model)
return jsonpickle.encode(emotion_blocks)
@app.route("/chunks", methods=['GET', 'POST'])
def chunks():
page = request.args['page']
pagination = request.args['pagination']
chunks = chunk_api.fetch_chunks(page, pagination)
return jsonpickle.encode(chunks)
@app.route("/verify_chunk", methods=['GET', 'POST'])
def verify_chunk():
chunk_id = request.args['chunk_id']
is_verified = request.args['is_verified'].lower().startswith('t')
chunks = chunk_api.mark_chunk_as_verified(chunk_id, is_verified)
return jsonpickle.encode(chunks)
@app.route("/seen_chunk", methods=['GET', 'POST'])
def seen_chunk():
chunk_id = request.args['chunk_id']
chunks = chunk_api.mark_chunk_as_seen(chunk_id)
return jsonpickle.encode(chunks)
@app.route("/update_chunk_transcription", methods=['GET', 'POST'])
def update_chunk_transcription():
chunk_id = request.args['chunk_id']
transcript = request.args['transcript']
print("New: "+transcript)
chunks = chunk_api.update_chunk_transcription(chunk_id, transcript)
return jsonpickle.encode(chunks)
@app.route('/')
def send_base():
path = 'index.html'
return send_from_directory('static/', path)
@app.route('/verified_chunks', methods=['GET'])
def send_verified_static():
path = 'index2.html'
return send_from_directory('static/', path)
@app.route('/static/<path:path>')
def send_static(path):
return send_from_directory('static', path)
@app.route('/audio/<path:path>')
def send_static_audio(path):
return send_from_directory('/home/absin/Downloads/dataset/chunks', path)
@app.route('/api/snippets/<int:count>/<int:page>')
def serve_snippet(count, page):
data = chunk_api.fetch_verified_chunks(count, page)
return json.dumps({'response': data})
if __name__ == '__main__':
app.run(debug=True, threaded=True, host='0.0.0.0', port='5010')
| null |
web.py
|
web.py
|
py
| 7,018 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "redis.ConnectionPool",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "tensorflow_hub.Module",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_default_graph",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "tensorflow.Session",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "tensorflow.global_variables_initializer",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "tensorflow.tables_initializer",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "tensorflow.string",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "flask.request.form",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "text.sentence_similarity.fast_sentence_similarity_many",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "text.sentence_similarity",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "text.sentence_similarity.fast_sentence_similarity",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "text.sentence_similarity",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "text.bert_mrpc.predict",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "text.bert_mrpc",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "jsonpickle.encode",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "uuid.uuid4",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "speech.utils.misc.get_task_url",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "speech.utils.misc",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "speech.analysis.main.transcribe_emotion",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "speech.analysis.main",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "jsonpickle.encode",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "uuid.uuid4",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "speech.utils.misc.get_task_url",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "speech.utils.misc",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "speech.analysis.main.transcribe_emotion",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "speech.analysis.main",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "jsonpickle.encode",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "uuid.uuid4",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "speech.utils.misc.get_task_url",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "speech.utils.misc",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "speech.emotion.emotion_api.getModel",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "speech.emotion.emotion_api",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "speech.analysis.main.emotion",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "speech.analysis.main",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "jsonpickle.encode",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "speech.emotion.emotion_api.getModel",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "speech.emotion.emotion_api",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "speech.emotion.emotion_api.windowing_emotion",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "speech.emotion.emotion_api",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "jsonpickle.encode",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "speech.transcription.transfer_learning.chunk_data_api.fetch_chunks",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "speech.transcription.transfer_learning.chunk_data_api",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "jsonpickle.encode",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "speech.transcription.transfer_learning.chunk_data_api.mark_chunk_as_verified",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "speech.transcription.transfer_learning.chunk_data_api",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "jsonpickle.encode",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "speech.transcription.transfer_learning.chunk_data_api.mark_chunk_as_seen",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "speech.transcription.transfer_learning.chunk_data_api",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "jsonpickle.encode",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "speech.transcription.transfer_learning.chunk_data_api.update_chunk_transcription",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "speech.transcription.transfer_learning.chunk_data_api",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "jsonpickle.encode",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "flask.send_from_directory",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "flask.send_from_directory",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "flask.send_from_directory",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "flask.send_from_directory",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "speech.transcription.transfer_learning.chunk_data_api.fetch_verified_chunks",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "speech.transcription.transfer_learning.chunk_data_api",
"line_number": 185,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 186,
"usage_type": "call"
}
] |
359372365
|
###################################################
### ###
### Complete Experiment on Tasic data ###
### written by Bettina Mieth, Nico Görnitz, ###
### Marina Vidovic and Alex Gutteridge ###
### ###
###################################################
# Please change all directories to yours!
import sys
sys.path.append('/home/bmieth/scRNAseq/implementations')
import logging
logging.basicConfig()
from functools import partial
from experiments_utils import (method_sc3_ours, method_sc3_combined_ours, method_transfer_ours, acc_ari, acc_kta)
from nmf_clustering import NmfClustering_initW
from utils import *
import datetime
from simulation import split_source_target
import pandas as pd
import sys
import numpy as np
# Running times
now1 = datetime.datetime.now()
print("Current date and time:")
print(now1.strftime("%Y-%m-%d %H:%M"))
# Data location - Please change directories to yours!
fname_data = '/home/bmieth/scRNAseq/data/matrix'
fname_labels = '/home/bmieth/scRNAseq/data/cell_labels_primary_grouped'
fname_final = '/home/bmieth/scRNAseq/results/mouse_data/mouse_completeoverlap.npz'
# Parameters
reps = 100 # number of repetitions, 100
n_src = [1000] # number of source data points, 1000
percs_aim = [25, 50, 100, 200, 400, 650] # target sizes to use. (has to be greater than num_cluster!), [25, 50, 100, 200, 400, 650]
mixes = [0.0,0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] # Mixture parameters of transfer learning SC3, [0.0,0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
# Pre-processing parameters for gene and cell filter
min_expr_genes = 2000
non_zero_threshold = 2
perc_consensus_genes = 0.94
preprocessing_first = True # Careful, for now this only supports True, within-filtering is not implemented
# Splitting mode defining how data is split in source and target set
splitting_mode = 2 # Split data in source and target randomly stratified (mode = 2, complete overlap) or one exclusive cluster for both target and source (the biggest ones) (mode = 4, non-complete overlap)
# NMF parameters
nmf_alpha = 10.0
nmf_l1 = 0.75
nmf_max_iter = 4000
nmf_rel_err = 1e-3
# List of accuracy functions to be used
acc_funcs = list()
acc_funcs.append(partial(acc_ari, use_strat=False))
acc_funcs.append(partial(acc_kta, mode=0))
# Read data
labels = np.loadtxt(fname_labels, delimiter='\t')
label_names, label_counts = np.unique(labels, return_counts = True)
print("Labels: ", label_names)
print("Counts: ", label_counts)
data = pd.read_csv(fname_data, sep='\t', header=None).values
print("Data dimensions before preprocessing: genes x cells", data.shape)
if preprocessing_first:
# Cell and gene filter and transformation before the whole procedure
cell_inds = sc.cell_filter(data, num_expr_genes=min_expr_genes, non_zero_threshold=non_zero_threshold)
data = data[:,cell_inds]
labels = labels[cell_inds]
gene_inds = sc.gene_filter(data, perc_consensus_genes=perc_consensus_genes, non_zero_threshold=non_zero_threshold)
data = data[gene_inds, :]
data = sc.data_transformation_log2(data)
# data is now filtered and transformed, don't do it again:
cell_filter_fun = partial(sc.cell_filter, num_expr_genes=0, non_zero_threshold=-1)
gene_filter_fun = partial(sc.gene_filter, perc_consensus_genes=1, non_zero_threshold=-1)
data_transf_fun = sc.no_data_transformation
print("data dimensions after preprocessing: genes x cells: ", data.shape)
print(data.shape)
else:
raise Warning("Within-Filtering is not implemented for R SC3")
# Cell and gene filter and transformation within the procedure
cell_filter_fun = partial(sc.cell_filter, num_expr_genes=min_expr_genes, non_zero_threshold=non_zero_threshold)
gene_filter_fun = partial(sc.gene_filter, perc_consensus_genes=perc_consensus_genes, non_zero_threshold=non_zero_threshold)
data_transf_fun = sc.data_transformation_log2
if len(np.unique(labels)) > np.min(percs_aim):
print("percs_aim need to be greater than num_cluster!")
sys.exit("error!")
# Specify dataset sizes
genes = data.shape[0] # number of genes
n_all = data.shape[1]
n_trg = n_all - n_src[0] # overall number of target data points
percs = np.true_divide(np.concatenate(percs_aim, n_trg), n_trg)
# List of methods to be applied
methods = list()
# original SC3 (SC3 on target data, TargetCluster)
methods.append(partial(method_sc3_ours))
# combined baseline SC3 (SC3 on combined source and target data, ConcatenateCluster)
methods.append(partial(method_sc3_combined_ours))
# transfer via mixing (Transfer learning via mixing source and target before SC3, TransferCluster)
# Experiment for all mixture_parameters
for m in mixes:
methods.append(partial(method_transfer_ours, mix=m, calc_transferability=False))
# Create results matrix
res = np.zeros((len(n_src), len(acc_funcs), reps, len(percs), len(methods)))
res_opt_mix_ind = np.zeros((len(n_src), reps, len(percs)))
res_opt_mix_aris = np.zeros((len(n_src), reps, len(percs)))
source_aris = np.zeros((len(n_src), reps))
source_ktas = np.zeros((len(n_src), reps))
# Prepare experiments
params = []
exp_counter = 1
num_exps = len(n_src) * reps * len(percs) * len(methods)
# Run experiments
for s in range(len(n_src)):
accs = np.zeros((len(acc_funcs), reps, len(percs), len(methods)))
accs_desc = list()
opt_mix_ind = np.zeros((reps, len(percs)))
opt_mix_aris = np.zeros((reps, len(percs)))
num_strat = np.zeros((reps, len(percs), len(methods)))
res_desc = []
r = 0
while r < reps:
# Split data in source and target randomly stratified (mode = 2) or with exclusive source and target clusters (mode = 4)
src, trg, src_labels, trg_labels = split_source_target(data, labels, mode=splitting_mode, target_ncells=n_trg, source_ncells=n_src[s])
trg_labels = np.array(trg_labels, dtype=np.int)
src_labels = np.array(src_labels, dtype=np.int)
# 3.a. Subsampling order for target
inds = np.random.permutation(trg_labels.size)
# 3.b. Use perfect number of latent states for nmf and sc3
src_lbl_set = np.unique(src_labels)
n_trg_cluster = np.unique(trg_labels).size
n_src_cluster = src_lbl_set.size
## 3.c. train source once per repetition
source_nmf = NmfClustering_initW(src, np.arange(src.shape[0]), num_cluster=n_src_cluster, labels=src_labels)
source_nmf.apply(k=n_src_cluster, alpha=nmf_alpha, l1=nmf_l1, max_iter=nmf_max_iter, rel_err=nmf_rel_err)
## Calculate ARIs and KTAs
source_aris[s, r] = metrics.adjusted_rand_score(src_labels[source_nmf.remain_cell_inds], source_nmf.cluster_labels)
print('ITER(', r+1, '): SOURCE ARI = ', source_aris[s,r])
# 3.d. Target data subsampling loop
print("Target data subsampling loop")
for i in range(len(percs)):
n_trg_perc = np.int(n_trg * percs[i]+0.5)
p_trg = trg[:, inds[:n_trg_perc]].copy()
p_trg_labels = trg_labels[inds[:n_trg_perc]].copy()
# 4. MTL/DA mixing parameter loop
res_desc = list()
for m in range(len(methods)):
print(('Running experiment {0} of {1}: Train target data of repetition {2} - {3} source cells, {4} genes, {5} target cells and the {6}th method'.format(exp_counter, num_exps, r+1, n_src[s], genes, n_trg_perc, m+1)))
source_nmf.cell_filter_list = list()
source_nmf.gene_filter_list = list()
# source data is already filtered and transformed ...
source_nmf.add_cell_filter(lambda x: np.arange(x.shape[1]).tolist())
source_nmf.add_gene_filter(lambda x: np.arange(x.shape[0]).tolist())
source_nmf.set_data_transformation(lambda x: x)
# Run method
desc,target_nmf, data_for_SC3,trg_lbls_pred = methods[m](source_nmf, p_trg.copy(), num_cluster=n_trg_cluster)
res_desc.append(desc)
# Evaluate results
print("Evaluation of target results")
accs_desc = list()
if m >=2:
mixed_data, _, _ = target_nmf.get_mixed_data(mix=mixes[m-2], calc_transferability=False)
for f in range(len(acc_funcs)):
if f==0:
accs[f, r, i, m], accs_descr = acc_funcs[f]([], p_trg.copy(), p_trg_labels.copy(), trg_lbls_pred.copy())
elif m>=2:
accs[f, r, i, m], accs_descr = acc_funcs[f](target_nmf, data_for_SC3, p_trg_labels.copy(), trg_lbls_pred.copy())
else:
accs_descr='score not computed for baslines'
accs_desc.append(accs_descr)
print(('Accuracy: {0} ({1})'.format(accs[f, r, i, m], accs_descr)))
perc_done = round(np.true_divide(exp_counter, num_exps)*100, 4)
print(('{0}% of experiments done.'.format(perc_done)))
exp_counter += 1
# Identify optimal mixture parameter
opt_mix_ind[r, i] = np.argmax(accs[1, r, i, 2:])
opt_mix_aris[r, i] = accs[0, r, i, int(opt_mix_ind[r, i]+2)]
r += 1
# Save results
params.append((s))
res[s, :, :, :, :] = accs
res_opt_mix_ind[s,:,:] = opt_mix_ind
res_opt_mix_aris[s,:,:] = opt_mix_aris
# Save results
np.savez(fname_final, methods=methods, acc_funcs=acc_funcs, res=res, accs_desc=accs_desc,
method_desc=res_desc, source_aris=source_aris, min_expr_genes=min_expr_genes,
non_zero_threshold=non_zero_threshold, perc_consensus_genes=perc_consensus_genes, nmf_alpha=nmf_alpha, nmf_l1=nmf_l1, nmf_max_iter=nmf_max_iter, nmf_rel_err=nmf_rel_err, percs=percs, reps=reps, genes=genes, n_src=n_src, n_trg=n_trg, mixes=mixes, res_opt_mix_ind=res_opt_mix_ind, res_opt_mix_aris=res_opt_mix_aris)
# Show running times
now2 = datetime.datetime.now()
print("Current date and time:")
print(now2.strftime("%Y-%m-%d %H:%M"))
print("Time passed:")
print(now2-now1)
print('Done.')
| null |
scripts/experiments/main_wrapper_tasic.py
|
main_wrapper_tasic.py
|
py
| 10,383 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.path.append",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "functools.partial",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "experiments_utils.acc_ari",
"line_number": 59,
"usage_type": "argument"
},
{
"api_name": "functools.partial",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "experiments_utils.acc_kta",
"line_number": 60,
"usage_type": "argument"
},
{
"api_name": "numpy.loadtxt",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.true_divide",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "experiments_utils.method_sc3_ours",
"line_number": 104,
"usage_type": "argument"
},
{
"api_name": "functools.partial",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "experiments_utils.method_sc3_combined_ours",
"line_number": 106,
"usage_type": "argument"
},
{
"api_name": "functools.partial",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "experiments_utils.method_transfer_ours",
"line_number": 110,
"usage_type": "argument"
},
{
"api_name": "numpy.zeros",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "simulation.split_source_target",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "numpy.int",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "numpy.int",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.permutation",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "numpy.unique",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "nmf_clustering.NmfClustering_initW",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "numpy.int",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "numpy.true_divide",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "numpy.savez",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 210,
"usage_type": "attribute"
}
] |
9629563
|
# coding: utf-8
#blog/views
from django.shortcuts import render
from blog.models import Post
#from django.views.generic import ListView, DetailView
def p_detail_view(request):
# v=Nor.objects.all() for the html2
# name = 'Form page'
# title = "Test Form"
# return render_to_response('test.html', {'name': name,'title':title})
# return render_to_response('layouts/index.html')
posts = Post.objects.all()
temp="blog/blog.html"
return render(request,temp, {
'posts': posts,
})
# class PostsListView(ListView): # представление в виде списка
# model = Post # модель для представления
# a=
# context = dict() #!
# context['articles']=Post.objects.all().order_by("-date")[:5]
# template_name="blog/blog.html"
# class PostDetailView(DetailView): # детализированное представление модели
# model = Post
| null |
blog/views.py
|
views.py
|
py
| 948 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "blog.models.Post.objects.all",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "blog.models.Post.objects",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "blog.models.Post",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 16,
"usage_type": "call"
}
] |
226358604
|
"""
实现 mysqldump 、mysqlbackup 、extrabackup 的备份
1、完成对 mysqldump ,mysqlbackup、extrabackup 这三个主流工具的封装
2、以给定实例作为输入,找出所有可用的备份工具
3、备份计划
"""
# (c) 2019, LeXing Jiang <[email protected] [email protected] https://www.sqlpy.com/>
# Copyright: (c) 2019, dbm Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import os
import re
import psutil
import shutil
import logging
import subprocess
from datetime import datetime
from configparser import ConfigParser
from mysql import connector
from . import messages
from . import errors
from . import checkings
from . import common
from .dbmacnf import cnf
logger = logging.getLogger('dbm-agent').getChild(__name__)
class BaseBackup(object):
"""
实现整个备份流程的各种功能
"""
args = []
logger = logging.getLogger("BaseBackup")
# subprocess.run 时使用
stdout = subprocess.DEVNULL
stderr = subprocess.DEVNULL
def __init__(self, host="127.0.0.1", port=3306, user="mysqldumper", password=None):
"""
"""
logger = self.logger.getChild("__init__")
logger.debug(f"prepare backup mysqld{port}")
self.host = host
self.port = port
self.user = user
self.args = self.args.copy()
self.now = datetime.now()
if password is None:
# 如果没有给定用户名和密码,那么不直接用默认密码
self.password = cnf.init_pwd
else:
self.password = password
def pre_checks(self):
"""
在备份之后要执行的验证
"""
logger = self.logger.getChild("pre_checks")
# 1、给定的用户能成功连接是,能否备份的一个前提条件
cnx = None
try:
# 创建连接
cnx = connector.connect(
host=self.host, port=self.port, user=self.user, password=self.password)
cursor = cnx.cursor()
# 可以成功执行到这里就认为通过检测
cursor.execute("select @@port")
except Exception as err:
#
logger.error(
f"get error on connecto to host={self.host} port={self.port} user={self.user} password={self.password}")
raise errors.MySQLIsNotRunningError(str(err))
finally:
if hasattr(cnx, 'close'):
cnx.close()
def clear(self):
"""
定义备份完成之后的一些清理工作
只保留两个备份集
"""
logger = self.logger.getChild("clear")
logger.info("start")
# 取得所有的备份集目录
backup_base_dir = f"/backup/mysql/backup/{self.port}"
dir_pattern = re.compile("[0-9]{4}-[0-9]{1,2}")
dirs = [os.path.join(backup_base_dir, item) for item in os.listdir(
backup_base_dir) if os.path.isdir(item) and dir_pattern.match(item)]
# 构建创建时间与目录的元组
create_time_dirs = [(os.stat(item).st_ctime, item) for item in dirs]
if len(dirs) >= 3:
# 如果备份集目录超过三个、找到最老的那个
_, dir = min(create_time_dirs)
logger.info(f"prepare remove {dir}")
shutil.rmtree(dir)
logger.info(f"done remove {dir}")
logger.info("complete")
def save_binlog_position(self):
"""
保存 binlog 文件的位点信息到 backup-sets/binlog-position.log
"""
logger = self.logger.getChild("save_binlog_position")
cnx = None
try:
# 文件位置
backup_log = os.path.join(self.backup_sets, "binlog-position.log")
now = datetime.now()
# 位点信息
cnx = connector.connect(
host=self.host, port=self.port, user=self.user, password=self.password)
cursor = cnx.cursor()
cursor.execute("show master status ;")
file_name, position, *_ = cursor.fetchone()
# 保存到文件
with open(backup_log, 'a') as f:
f.write(f"{now.isoformat()} {file_name} {position} \n")
except Exception as err:
logger.error(
f"exception occur during save_binlog_position {str(err)}")
finally:
if hasattr(cnx, 'close'):
cnx.close()
@property
def backup_sets(self):
"""
返回备份集的目录
类似于: /backup/mysql/backup/3306/2020-4
"""
logger = self.logger.getChild("backup_sets")
year, week, *_ = self.now.isocalendar()
sts = os.path.join(
f"/backup/mysql/backup/{self.port}/", f"{year}-{week}")
logger.debug(f"backup-sets = {sts}")
return sts
def backup(self):
"""
执行备份
"""
logger = self.logger.getChild("backup")
# 如果前置检查都过不了,那么就不用备份了
try:
self.pre_checks()
except errors.Error as err:
logger.error(f"backup opration fail because : {err}")
return None
# 如果可以执行到这里,说明基本可以备份了(无法排除权限问题)
# 完成 args 参数的配置
logger.info("prepare all setup function")
self.setup()
# 打印一下自动化配置下的参数
logger.info(f"args like this {self.args}")
# 创建备份集目录
sts = self.backup_sets
if not os.path.isdir(sts):
logger.warn(
f"backupset dir is not exists we well create it. {sts}")
# os.mkdir(sts)
os.makedirs(sts)
# 保存一下 binlog 的位点信息(方便在做时点还原的时候大致知道从哪个文件开始)
self.save_binlog_position()
# 在子进程中备份数据库
try:
logger.info("prepare execute mysqlbackup commond")
subprocess.run(
self.args, check=True, stderr=self.stderr, stdout=self.stdout)
# 备份完成之后改一下文件的权限
# 本来是要用 mysql{self.port} 这个用户备份数据库的,但是遇到 linux 上的一个错误目前还没有解决!!!
common.recursive_change_owner(sts, f"mysql{self.port}", "mysql")
logger.info("backup complete")
except subprocess.CalledProcessError as err:
logger.error(str(err))
except subprocess.TimeoutExpired as err:
logger.error(str(err))
finally:
# 资源回收
if hasattr(self.stdout, 'closed') and self.stdout.closed == False:
self.stdout.close()
if hasattr(self.stderr, 'closed') and self.stderr.closed == False:
self.stderr.close()
# 临时文件清理
self.clear()
class MySQLDumpMixin(object):
"""
完成 mysqldump 相关的基本操作、如查询出 mysqldump 命令在哪里这样的
"""
logger = logger.getChild("MySQLDumpMixin")
# args 将来要用于 subprocess.run 函数
args = []
defautls = {
'triggers': '',
'routines': '',
'events': '',
'compress': '',
'all-databases': '',
'default-character-set': 'binary',
'delete-master-logs': 'OFF',
# 'flush-logs': '',
'flush-privileges': '',
'master-data': 2,
'single-transaction': 'ON',
'max-allowed-packet': '128M',
'dump-date': '',
}
def get_mysqldump_cmd(self):
"""
查询出 mysqldump 的绝对路径
"""
logger = self.logger.getChild("get_mysqldump_cmd")
cnx = None
try:
cnx = connector.connect(
host=self.host, port=self.port, user=self.user, password=self.password)
cursor = cnx.cursor()
cursor.execute("select @@basedir;")
# 查询出 basedir 是多少
basedir, *_ = cursor.fetchone()
cmd = os.path.join(basedir, 'bin/mysqldump')
logger.info(messages.USING_XX_AS_BACKUP_TOOL.format(cmd))
return cmd
except Exception as err:
logger.warn(str(err))
finally:
if hasattr(cnx, 'close'):
cnx.close()
def get_defaults(self, kwargs=None):
"""
返回在 MySQLDumpMixin 这个级别就可以确定的参数
"""
logger = self.logger.getChild("get_defaults")
# 复制默认值
if kwargs is None:
kwargs = {}
options = self.defautls.copy()
options.update({'user': self.user, 'password': self.password,
'host': self.host, 'port': self.port})
options.update(kwargs)
logger.debug(f"options like this {options}")
return options
def setup(self):
"""
完成 mysqlump 的基本参数
"""
logger = self.logger.getChild("setup")
cmd = self.get_mysqldump_cmd()
logger.info(f"using {cmd} to backup")
self.args = [cmd, ]
for k, v in self.get_defaults().items():
if v != '':
self.args.append(f'--{k}={v}')
else:
self.args.append(f'--{k}')
# 设置stdout 和 stderr
self.stdout = subprocess.DEVNULL
self.stderr = subprocess.DEVNULL
class MySQLBackupMixin(object):
"""
完成 mysqlbackup 相关的基本操作
"""
logger = logger.getChild("MySQLBackupMixin")
args = []
defaults = {
'on-disk-full': 'abort_and_remove',
'compress': '',
'skip-binlog': '',
'skip-relaylog': '',
'progress-interval': 1,
'process-threads': 1,
'read-threads': 1,
'write-threads': 1,
'limit-memory': 128,
}
def get_mysqlbackup_cmd(self):
"""
返回对应版本的 mysqldump 命令
"""
logger = self.logger.getChild("get_mysqlbackup_cmd")
cnx = None
try:
# 查询出 mysql 的版本号
cnx = connector.connect(
host=self.host, port=self.port, user=self.user, password=self.password)
cursor = cnx.cursor()
cursor.execute("select @@version;")
version, *_ = cursor.fetchone()
# 拼接出对应版本的 meb 工具
mysqlbackup = f"/usr/local/mysql-commercial-backup-{version}-linux-glibc2.12-x86_64/bin/mysqlbackup"
if os.path.isfile(mysqlbackup):
# 如果存在就返回
return mysqlbackup
mysqlbackup = f"/usr/local/meb/bin/mysqlbackup"
if os.path.isfile(mysqlbackup):
# 在 /usr/local/meb/bin 下可以找到能用的也行
return mysqlbackup
# 如果执行到这里,说明以上两个地方都找不到就返回 None
return None
except Exception as err:
logger.error(str(err))
return None
finally:
if hasattr(cnx, 'close'):
cnx.close()
def get_defaults(self, kwargs=None):
"""
返回 args
"""
if kwargs is None:
kwargs = {}
options = self.defaults.copy()
options.update(kwargs)
options.update({'user': self.user, 'password': self.password,
'host': self.host, 'port': self.port})
# 备份进度
sts = self.backup_sets
progress_file = os.path.join(f"file:{sts}", "mysqlbackup-progress.log")
options.update({'show-progress': progress_file})
logger.debug(f"options like this {options}")
# 读并发
cpu = psutil.cpu_count()
read_threads = int(cpu * 0.1) + 1
process_threads = int(cpu * 0.2) + 2
write_threads = int(cpu * 0.1) + 1
options.update({
'read-threads': read_threads,
'write-threads': write_threads,
'process-threads': process_threads,
})
# 内存占用情况
available = psutil.virtual_memory().available / 1024 / 1024
limit_memory = 128
if available >= 1024:
limit_memory = 256
elif available >= 2048:
limit_memory = 512
elif available >= 3072:
limit_memory = 1024
elif available >= 4096: # 4G
limit_memory = 1152
elif available >= 6144: # 8G
limit_memory = 2048
elif available >= 10240: # 10G
limit_memory = 3072
elif available >= 12288: # 12G
limit_memory = 4096
elif available >= 16384: # 16G
limit_memory = 6144
else:
limit_memory = 10240
options.update({'limit-memory': limit_memory})
return options
def setup(self):
"""
"""
logger = self.logger.getChild("setup")
# 解析出命令
cmd = self.get_mysqlbackup_cmd()
logger.info(f"using {cmd} to backup")
self.args = [cmd, ]
# 创建临时目录
sts = self.backup_sets
backup_dir = os.path.join(
sts, f"{self.now.isoformat()[:19]}-temp")
if not os.path.isdir(backup_dir):
# 如果临时目录还没有那么就创建出来
os.makedirs(backup_dir)
# 把 self.stderr 设置上
# backup 函数会对这个做 close
stderr = os.path.join(sts, f"{self.now.isoformat()}.log")
self.stderr = open(stderr, 'w')
# 把属性添加到 self 方便后面使用
self.backup_dir = backup_dir
logger.info(f"set backup-dir to {backup_dir}")
#
for k, v in self.get_defaults(kwargs={'backup-dir': backup_dir}).items():
if v != '':
self.args.append(f'--{k}={v}')
else:
self.args.append(f'--{k}')
# 添加 backup-to-image
self.args.append('backup-to-image')
class XtraBackupMixin(object):
"""
"""
logger = logger.getChild("XtraBackupMixin")
args = []
defaults = {
'compress': '',
}
class MySQLBackupFullBackupMixin(MySQLBackupMixin):
"""
实现 mysqlbackup 进行全备的相关逻辑
"""
logger = logger.getChild("MySQLBackupFullBackupMixin")
def get_defaults(self, kwargs=None):
"""
"""
logger = self.logger.getChild("get_defaults")
if kwargs is None:
kwargs = {}
# 把 backup-image 加上就可以了
sts = self.backup_sets
backup_image = os.path.join(
sts, f"{self.now.isoformat()}-full-backup.mbi")
kwargs.update({'backup-image': backup_image})
#logger.info(f"kwargs = {kwargs}")
options = MySQLBackupMixin.get_defaults(self, kwargs)
logger.info(options)
return options
class MySQLBackupDiffBackupMixin(MySQLBackupMixin):
"""
"""
logger = logger.getChild("MySQLBackupDiffBackupMixin")
def get_defaults(self, kwargs=None):
"""
"""
logger = self.logger.getChild("get_defaults")
if kwargs is None:
kwargs = {}
# 设置 backup-image
sts = self.backup_sets
backup_image = os.path.join(
sts, f"{self.now.isoformat()}-diff-backup.mbi")
kwargs.update({'backup-image': backup_image})
# 设置 --increament
kwargs.update(
{'incremental': '', 'incremental-base': 'history:last_full_backup'})
options = MySQLBackupMixin.get_defaults(self, kwargs)
# logger.info(options)
return options
class MySQLDumpFullBackupMixin(MySQLDumpMixin):
"""
"""
logger = logger.getChild("MySQLDumpFullBackupMixin")
def get_defaults(self, kwargs=None):
"""
"""
if kwargs is None:
kwargs = {}
sts = self.backup_sets
backup_file = os.path.join(
sts, f'{self.now.isoformat()}-full-backup.sql')
kwargs.update({'result-file': backup_file})
return MySQLDumpMixin.get_defaults(self, kwargs)
class MySQLDumpNoDataMixin(MySQLDumpMixin):
"""
实现只 dump schema 相关的逻辑
"""
logger = logger.getChild("MySQLDumpNoDataMixin")
def get_defaults(self, kwargs=None):
"""
"""
if kwargs is None:
kwargs = {}
options = MySQLDumpMixin.get_defaults(self)
del options['flush-privileges']
del options['master-data']
del options['single-transaction']
sts = self.backup_sets
options['result-file'] = os.path.join(sts,
f'{self.now.isoformat()}-only-schema.sql')
options['no-data'] = ''
return options
# mysqldump 支持两个备份模式 “全量” “只备份定义(only-schema)”
class MySQLDumpFullBackup(MySQLDumpFullBackupMixin, BaseBackup):
"""
实现 mysqldump 进行全量备份的所有逻辑
"""
pass
class MySQLDumpNoDataBackup(MySQLDumpNoDataMixin, BaseBackup):
"""
实现 mysqldump 只导出 schema 的相关逻辑
"""
pass
# mysqlbackup 支持两种备份模板 “全量”,“差异”
class MySQLBackupFullBackup(MySQLBackupFullBackupMixin, BaseBackup):
"""
实现基于 mysqlbackup 的全量备份
"""
pass
class MySQLBackupDiffBackup(MySQLBackupDiffBackupMixin, BaseBackup):
"""
实现基于 mysqlbackup 的差异备份逻辑
"""
def usable_backup_tools(host="127.0.0.1", port=3306, user="mysqldump", password="dbma@0352"):
"""
解析出当前实例下所有可用的备份工具
"""
lgr = logger.getChild("usable_backup_tools")
# 连接上实例,用于确认版本号和 basedir
cnx = None
tools = []
try:
lgr.info(
f"prepare connect to host={host} port={port} user={user} password={password}")
cnx = connector.connect(host=host, port=port,
user=user, password=password)
cursor = cnx.cursor()
cursor.execute("select @@version,@@basedir")
version, basedir = cursor.fetchone()
lgr.info(f"version = {version}")
lgr.info(f"basdir = {basedir}")
# 查询对应版本的 meb 是否存在
mysqlbacup = f"/usr/local/mysql-commercial-backup-{version}-linux-glibc2.12-x86_64/bin/mysqlbackup"
if os.path.isfile(mysqlbacup):
lgr.info("mysqlbackup exists")
tools.append("mysqlbackup")
# 查询 mysqldump 是否存在
mysqldump = os.path.join(basedir, "bin/mysqldump")
if os.path.isfile(mysqldump):
lgr.info("mysqldump exists")
tools.append("mysqldump")
except Exception as err:
lgr.error(f"{str(err)}")
finally:
if hasattr(cnx, 'close'):
cnx.close()
lgr.info(tools)
return tools
def get_current_backup_sets(port=3306):
"""查询当前实例应该使用的备份集信息
"""
year, week, *_ = datetime.now().isocalendar()
sts = os.path.join(
f"/backup/mysql/backup/{port}/", f"{year}-{week}")
return sts
class BackupChecker(object):
"""检查备份是否存在于是否成功
"""
logger = logger.getChild("BackupChecker")
def __init__(self, port=3306):
"""
"""
logger = self.logger.getChild("__init__")
logger.info("start")
self.port = port
# 当前时间的前缀 2020-02-22
self.todaystr = datetime.now().isoformat()[:10]
year, week, *_ = datetime.now().isocalendar()
self.backup_set_dir = os.path.join(
f"/backup/mysql/backup/{port}/", f"{year}-{week}")
logger.info("complete")
@property
def has_backup_set(self):
"""检查是否有备份集
"""
logger = self.logger.getChild("has_backup_set")
logger.info("start")
# 如果备份集的目录不存在那么就是没有
if not os.path.exists(self.backup_set_dir):
logger.info("complete")
return False
else:
logger.info("complete")
return True
@property
def has_mbi_full_backup(self):
"""检查是否有 mysqlbackup 做的全备
"""
logger = self.logger.getChild("has_mbi_full_backup")
logger.info("start")
#
if self.has_backup_set == False:
# 备份集都没有就不可能有基于 mysqlbackup 的全备文件
logger.debug("backup set not exists")
logger.info("complete")
return False
# 执行到这里说明备份集存在于是检查是否有全备文件
backups = [backup for backup in os.listdir(
self.backup_set_dir) if backup.endswith("full-backup.mbi")]
logger.info(f"{backups}")
if len(backups) == 0:
# 空列表说明没有全备
logger.info("complete")
return False
# 执行到这里说明 全备文件是存在的,那么就要检查它是否成功了
* _, lastbackup = backups
logfile = lastbackup[:26] + '.log'
log_file_path = os.path.join(self.backup_set_dir, logfile)
# 如果文件这个时候被删除了,那么就有可能不存在,所以先检查一下
if not os.path.isfile(log_file_path):
logger.warning(f"log file '{log_file_path}' not exists")
logger.info("complete")
return False
# 执行到这里说明文件是有的
log_file_size = os.stat(log_file_path).st_size
logger.info(f"prepare open {log_file_path}")
with open(log_file_path) as logfile:
# 指向最后 100 个字节
logfile.seek(log_file_size - 100)
for line in logfile:
# 能找到成功的标志就说明成功了,不然说说明没有成功
if 'mysqlbackup completed OK!' in line:
logger.info("complete")
return True
logger.info("complete")
return False
@property
def has_mbi_diff_backup(self):
"""检查增量备份是否成功
"""
logger = self.logger.getChild("has_mbi_diff_backup")
logger.info("start")
# 如果备份集都没有那么就认为 diff 备份也没有
if self.has_backup_set == False:
logger.warning("backup set dir not exists")
logger.info("complete")
return False
# 执行到这里说明备份集是有的,那么准备找今天有没有 diff 备份,full 也看成是 diff
backups = [backup for backup in os.listdir(self.backup_set_dir) if backup.startswith(
self.todaystr) and backup.endswith(".log")]
if len(backups) == 0:
#
logger.warning("cant find any backup file ")
logger.info("complete")
return False
# 执行到这里说明是有日志文件的、那么就看一下最后一个备份日志文件中有没有记录成功
* _, logfile = backups
log_file_path = os.path.join(self.backup_set_dir, logfile)
log_file_size = os.stat(log_file_path).st_size
with open(log_file_path) as logfile:
#
logger.info("")
logfile.seek(log_file_size - 100)
for line in logfile:
# 如果有成功的标示就说明成功了
if 'mysqlbackup completed OK!' in line:
logger.info("complete")
return True
logger.info("complete")
return False
@property
def has_sql_backup(self):
"""检查今天 mysqldump 的备份有没有成功
"""
logger = self.logger.getChild("has_sql_backup")
logger.info("start")
if self.has_backup_set == False:
logger.warning("backup set not exits")
logger.info("complete")
return False
#
backups = [backup for backup in os.listdir(
self.backup_set_dir) if backup.endswith("full-backup.sql")]
# 检查备份集是否存在
if len(backups) == 0:
#
logger.warning("backup set is empty")
logger.info("complete")
return False
#
* _, lastbackup = backups
last_backup_file_path = os.path.join(self.backup_set_dir, lastbackup)
last_backup_file_size = os.stat(last_backup_file_path).st_size
with open(last_backup_file_path) as backupfile:
backupfile.seek(last_backup_file_size - 100)
for line in backupfile:
if '-- Dump completed on' in line:
#
logger.info(
f"has available backup '{last_backup_file_path}' ")
logger.info("complete")
return True
logger.info("complete")
return False
def today_has_backup(port=3306):
"""
检查今天是否已经备份过
"""
lgr = logger.getChild("today_has_backup")
today = datetime.now().isoformat()[:10]
year, week, *_ = datetime.now().isocalendar()
sts = os.path.join(
f"/backup/mysql/backup/{port}/", f"{year}-{week}")
if not os.path.isdir(sts):
# 如果备份集都没有,那么是一定没有备份的
lgr.info("backup sets not exists.")
return False
# 执行到这里说明有备份集
backups = []
for backup in os.listdir(sts):
if backup.startswith(today):
backups.append(backup)
# 如果列表的长度是空的那么今天定是没有备份的
if len(backups) == 0:
# 说明今天还没有备份
lgr.info("backup file not exists")
return False
# 执行到这里说明是有备份的,那么就要检查备份是否成功了
backups = [backup for backup in backups if backup.endswith(
'.log') or backup.endswith('sql')]
* _, backup = backups
# 取得最后的一个备份文件
lgr.info("validate backup file")
file_abs_path = os.path.join(sts, backup)
file_size = os.stat(file_abs_path).st_size
with open(file_abs_path) as f:
f.seek(file_size - 100)
for line in f:
if "mysqlbackup completed OK!" in line or "-- Dump completed on" in line:
lgr.info("backup file valid")
return True
lgr.info("backup file validate fail we need a new one")
return False
def has_full_backup(port=3306):
"""检查是否有可用的全备
"""
lgr = logger.getChild("has_full_backup")
lgr.info("start")
# 如果备份集的目录不存在那么一定是没有的
today = datetime.now().isoformat()[:10]
year, week, *_ = datetime.now().isocalendar()
sts = os.path.join(
f"/backup/mysql/backup/{port}/", f"{year}-{week}")
if not os.path.isdir(sts):
# 如果备份集都没有,那么是一定没有备份的
lgr.info("backup sets not exists.")
return False
# 目录已经存在就要检查全备是否成功
# 使用 mysqldump 的情况
lgr.debug("prepare checking has *full-backup.sql file or not")
mysqldumps = [dump for dump in os.listdir(
sts) if dump.endswith("full-backup.sql")]
lgr.debug(f"find dump files {mysqldumps}")
if len(mysqldumps) == 0:
lgr.debug("full backup not exists")
return False
def clean_backup_sets(port=3306, sets_count=2):
"""
清理备份集(第次至多清理一个)
"""
logger.debug("checking is there has any backup sets can remove")
backups = []
backup_base_dir = f"/backup/mysql/bacup/{port}"
pattern = r"\d{4}-\d{1,2}"
for sets in os.listdir(backup_base_dir):
sets_dir = os.path.join(backup_base_dir, sets)
if os.path.isdir(sets_dir) and re.match(pattern, sets):
backups.append(os.path.join(backup_base_dir, sets))
if len(backups) > sets_count:
logger.info(f"current backup sets {backups}")
oldest, *_ = backups
logger.info(f"remove backup set {oldest}")
os.removedirs(oldest)
def backup_binlog(port=3306):
"""
"""
pass
| null |
dbma/backup.py
|
backup.py
|
py
| 28,568 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "subprocess.DEVNULL",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "subprocess.DEVNULL",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "dbmacnf.cnf.init_pwd",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "dbmacnf.cnf",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "mysql.connector.connect",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "mysql.connector",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "re.compile",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "os.stat",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "mysql.connector.connect",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "mysql.connector",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "subprocess.CalledProcessError",
"line_number": 206,
"usage_type": "attribute"
},
{
"api_name": "subprocess.TimeoutExpired",
"line_number": 208,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector.connect",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "mysql.connector",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 261,
"usage_type": "attribute"
},
{
"api_name": "subprocess.DEVNULL",
"line_number": 305,
"usage_type": "attribute"
},
{
"api_name": "subprocess.DEVNULL",
"line_number": 306,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector.connect",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "mysql.connector",
"line_number": 336,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 344,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 350,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 379,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 379,
"usage_type": "attribute"
},
{
"api_name": "psutil.cpu_count",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "psutil.virtual_memory",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 433,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 433,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 436,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 436,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 439,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 443,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 443,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 487,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 513,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 513,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 540,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 540,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 564,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 564,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector.connect",
"line_number": 614,
"usage_type": "call"
},
{
"api_name": "mysql.connector",
"line_number": 614,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 624,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 624,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 630,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 630,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 631,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 631,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 648,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 648,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 649,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 649,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 667,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 667,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 668,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 668,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 669,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 669,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 682,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 682,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 706,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 719,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 719,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 722,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 722,
"usage_type": "attribute"
},
{
"api_name": "os.stat",
"line_number": 728,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 758,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 769,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 769,
"usage_type": "attribute"
},
{
"api_name": "os.stat",
"line_number": 770,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 801,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 814,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 814,
"usage_type": "attribute"
},
{
"api_name": "os.stat",
"line_number": 815,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 838,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 838,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 839,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 839,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 840,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 840,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 842,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 842,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 849,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 867,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 867,
"usage_type": "attribute"
},
{
"api_name": "os.stat",
"line_number": 868,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 887,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 887,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 888,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 888,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 889,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 889,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 891,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 891,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 899,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 916,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 918,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 918,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 919,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 919,
"usage_type": "attribute"
},
{
"api_name": "re.match",
"line_number": 919,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 920,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 920,
"usage_type": "attribute"
},
{
"api_name": "os.removedirs",
"line_number": 926,
"usage_type": "call"
}
] |
214504290
|
from django.shortcuts import render, redirect
from .models import Review
from .forms import ReviewForm
from django.contrib.auth.decorators import login_required
@login_required
def list_reviews(request):
reviews = Review.objects.all()
return render(request, 'reviews/list.html', {'reviews': reviews})
@login_required
def create_review(request):
form = ReviewForm(request.POST or None)
if form.is_valid():
review = form.save(commit=False)
review.author = request.user
review.save()
return redirect('list_reviews')
return render(request, 'reviews/form.html', {'form': form})
@login_required
def update_review(request, id):
review = Review.objects.get(id=id)
form = ReviewForm(request.POST or None, instance=review)
if form.is_valid():
form.save()
return redirect('list_reviews')
return render(request, 'reviews/form.html', {'form': form, 'review': review})
@login_required
def delete_review(request, id):
review = Review.objects.get(id=id)
if request.method == 'POST':
review.delete()
return redirect('list_reviews')
return render(request, 'review/delete-confirm.html', {'review': review})
| null |
src/reviews/views.py
|
views.py
|
py
| 1,217 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "models.Review.objects.all",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "models.Review.objects",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "models.Review",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "forms.ReviewForm",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "models.Review.objects.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "models.Review.objects",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "models.Review",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "forms.ReviewForm",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "models.Review.objects.get",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "models.Review.objects",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "models.Review",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 38,
"usage_type": "name"
}
] |
287842692
|
import os
from setuptools import find_packages, setup
from auth_updater import __VERSION__
install_requires = [
#locked
'django>=3.1.1',
'django-celery-beat',
'django-registration',
'celery>=5.0.0rc3',
'gunicorn',
'mysqlclient',
'dnspython',
'passlib',
'requests',
'bcrypt',
'python-slugify',
'requests-oauthlib',
'semantic_version',
'packaging',
'redis',
'celery_once',
'django-bootstrap-form',
'django-sortedm2m',
'django-redis-cache',
'django-esi',
'bravado']
testing_extras = [
'coverage>=4.3.1',
'requests-mock>=1.2.0',
'django-nose',
'django-webtest',
]
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='allianceauth-updater',
version=__VERSION__,
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'testing': testing_extras
},
license='MIT',
description='Alliance Auth Dependency update helper',
long_description=long_description,
url="https://github.com/pvyParts/allianceauth-updater",
long_description_content_type='text/markdown',
author='AaronKable',
author_email='[email protected]',
)
| null |
setup.py
|
setup.py
|
py
| 1,443 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.abspath",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "os.chdir",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.path.normpath",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.pardir",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "setuptools.setup",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "auth_updater.__VERSION__",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "setuptools.find_packages",
"line_number": 46,
"usage_type": "call"
}
] |
500254481
|
"""Functions to manage os files and Path objects.
Intended to be used within a Python 3 environment.
Developed by Rodrigo Rivero.
https://github.com/rodrigo1392
"""
# Flexibility for python 2.x
try:
import configparser
except ImportError:
import ConfigParser as configparser
try:
from pathlib import Path
except ImportError:
pass
import ast
import os
import shutil
from . import strings_tools as st
def create_non_existent_folder(folder_path):
"""Create a folder if it does not exist yet.
Parameters
----------
folder_path : Path
Path of folder to verify/create.
Returns
-------
Path
Path of verified/created folder.
"""
# Normalize input path and attempt to create folder. If it already
# exists, do nothing.
folder_path = Path(folder_path)
try:
folder_path.mkdir()
print(str(folder_path), 'folder created')
return folder_path
except FileExistsError:
return folder_path
def extract_config_from_cfg(cfg_path):
"""Extract input data from *.cfg file.
Parameters
----------
cfg_path : Path
Config file to read from.
Returns
-------
dict
Config keywords: python objects pairs.
"""
# Start parser engine and read cfg file.
cfg = configparser.ConfigParser()
cfg.read(cfg_path)
# Gather all input variables and merge them in one dict.
input_data = ({k.lower(): v for k, v in cfg.items(i)}
for i in cfg.sections())
config_dict = {}
for i in input_data:
config_dict.update(i)
# Try to convert variables to Python objects.
output_data = {}
for k, value in config_dict.items():
try:
output_data[k] = ast.literal_eval(value)
except SyntaxError:
output_data[k] = value
except TypeError:
output_data[k] = value
return output_data
def find_file(root_path, searched_file, recursively=False):
"""Find instances of file, searching in a folder system.
Parameters
----------
root_path : Path
Top level folder, start search here.
searched_file : str
Name of searched file.
recursively : bool, optional
If True, search folders recursively. Default is False.
Returns
-------
list
Full Paths of `searched_file` instances.
False
If file is not found.
"""
# List all files in root and extract searched file path.
paths_list = list_files(root_path, True, recursively)
file_path = [i for i in paths_list if str(i.name) == searched_file]
if not file_path:
print('File not found')
return False
return file_path
def generate_folder_walker(root_path, level=1):
"""Create generator that walks a folder recursively.
Parameters
----------
root_path : Path
Top level folder, start search here.
level : int
Amount of levels to walk for.
Yields
------
Path
Paths of accessed folders.
"""
root_path = root_path.rstrip(os.path.sep)
assert os.path.isdir(root_path)
num_sep = root_path.count(os.path.sep)
for root, dirs, files in os.walk(root_path):
yield root, dirs, files
num_sep_this = root.count(os.path.sep)
if num_sep + level <= num_sep_this:
del dirs[:]
def list_files(root_path, full_path=True, recursively=True):
"""List all files paths in a folder.
Parameters
----------
root_path : Path
Top level folder, start search here.
full_path : bool, optional
If True, gets Full Path of files, instead of just files names.
recursively : bool, optional
If True, search folders recursively. Default is False.
Returns
-------
List
Paths of all existing files.
"""
root_path = Path(root_path)
# List files with or without recursion.
if recursively:
paths_list = [f for f in root_path.rglob("*") if f.is_file()]
else:
paths_list = [f for f in root_path.iterdir() if f.is_file()]
if not full_path:
paths_list = [f.name for f in paths_list]
# Try to sort files by digits
try:
sorted_list_as_strings = st.sort_strings_by_digit(paths_list)
return [Path(i) for i in sorted_list_as_strings]
except IndexError:
return paths_list
def list_files_with_extension(root_path, extension, full_path=True,
recursively=True):
"""List all files paths in a folder, filtered by a given suffix.
Parameters
----------
root_path : Path
Top level folder, start search here.
extension : str
Extension of files to list.
full_path : bool, optional
If True, gets Full Path of files, instead of just files names.
recursively : bool, optional
If True, search folders recursively. Default is False.
Returns
-------
List
Paths of filtered files.
"""
# List all files in root and filter them by extension.
paths_list = list_files(root_path, full_path, recursively)
paths_list = [i for i in paths_list if
i.suffix == '.' + extension.replace('.', '')]
return paths_list
def list_files_with_substring(root_path, input_string, full_path=True,
recursively=True):
"""List all files paths in a folder, filtered by given substring.
Parameters
----------
root_path : Path
Top level folder, start search here.
input_string : str
String to filter paths by.
full_path : bool, optional
If True, gets Full Path of files, instead of just files names.
recursively : bool, optional
If True, search folders recursively. Default is False.
Returns
-------
List
Paths of filtered files.
"""
# List all files in root and filter them by substring.
paths_list = list_files(root_path, full_path, recursively)
paths_list = [i for i in paths_list if input_string in i.name]
return paths_list
def manage_old_version_file(file_path):
"""Avoid file overwriting, managing 'old' version of it.
Algorithm looks for and 'old_' version of `file_path` in its same
directory. If it finds it, returns a copy of it, if not, creates a
copy of `file_path` as 'old_' version, setting it as the new backup
file. This allows to have a backup of `file_path` available.
Parameters
----------
file_path : Path
Path of file of interest.
Returns
-------
Path
Path of file that can be safely modified.
"""
# Set old version file path
file_path = Path(file_path)
old_version_file = modify_filename_in_path(file_path,
added='old_',
prefix=True)
# If old version exists, create a copy without prefix and return
# that path. If not, create a copy with prefix and set it as the
# new backup file.
if old_version_file.exists():
shutil.copy(str(old_version_file), str(file_path))
output = file_path
elif Path(file_path).exists():
shutil.copy(file_path, old_version_file)
output = file_path
# Report if no file was found
else:
print(Path(file_path).name, 'FILE NOT FOUND IN', str(file_path.parent))
output = None
return output
def modify_filename_in_path(file_path, new_name=None, added=None,
prefix=False):
"""Modify file name of a given full path.
The algorithm considers three types of modifications:
- Full file name replace.
- Adding of prefix or suffix to file name.
- Combination of former two.
Parameters
----------
file_path : Path
Full Path of file name to be modified.
new_name : str, optional
If given, replace file name with it.
added : str, optional
If given, add to file name as prefix or suffix
prefix : bool, optional
If True, add `added` as prefix. Otherwise, add it as suffix.
Returns
-------
Path
Modified full Path.
"""
# Normalize input to Path object and build new file name.
file_path = Path(file_path)
if new_name is None:
new_name = file_path.stem
if added is not None:
if prefix:
new_name = added + new_name
else:
new_name = new_name + added
output = Path(file_path.parent, new_name).with_suffix(file_path.suffix)
return output
def renumber_file(file_path, delta):
"""Modify a file name adding a number to the last digit found in it.
Parameters
----------
file_path : Path
Path of file to be renamed.
delta : int
Number to be add to the last digit found in file name.
Returns
-------
Path
Full Path of new file name.
"""
# Normalize input to Path object and extract original number
file_path = Path(file_path)
number = st.extract_number_from_str(file_path)
# Rename file
output_path = str(file_path).replace(str(number), str(number + delta))
file_path.rename(output_path)
return output_path
def save_files_list_2txt(root_path, txt_path=None, full_path=False,
recursively=False):
"""Save all files paths in a folder to a txt file.
Parameters
----------
root_path : Path
Top level folder, start search here.
txt_path : Path, optional
Path of txt file. If not defined, create it in current folder.
full_path : bool, optional
If True, gets Full Path of files, instead of just files names.
recursively : bool, optional
If True, search folders recursively. Default is False.
Returns
-------
Path
Path of txt file.
"""
# Set default output file and normalize input suffix
if not txt_path:
txt_path = Path(root_path, 'files_list')
txt_path = txt_path.with_suffix('.txt')
# List all files in root and save to output txt
paths_list = list_files(root_path, full_path, recursively)
with open(txt_path, 'w+') as file_out:
file_out.write('\n'.join(paths_list))
return txt_path
def size_folder(root_path=None, recursively=True):
"""Calculate the size of a given folder in MB.
Parameters
----------
root_path : Path, optional
Folder to find the size of. If None given, size current folder.
recursively : bool, optional
If True, consider sub-folders recursively. Default is False.
Returns
-------
float
Size of folder in MB.
"""
# List all files in root folder and sum their stats
if root_path is None:
root_path = Path.cwd()
paths_list = list_files(root_path, True, recursively)
return round(sum(f.stat().st_size for f in paths_list) / (1024*1024), 3)
| null |
pymiscell/filesystem_tools.py
|
filesystem_tools.py
|
py
| 10,920 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pathlib.Path",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "ConfigParser.ConfigParser",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "ast.literal_eval",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "shutil.copy",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "shutil.copy",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "pathlib.Path.cwd",
"line_number": 379,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 379,
"usage_type": "name"
}
] |
27765681
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 23 14:50:17 2018
@author: Bea
"""
from sklearn.cluster import KMeans
import pandas as pd
import numpy as np; np.random.seed(0)
import seaborn.apionly as sns
import matplotlib.pyplot as plt
def ReadingAndColumnSelector (DataIn, sepFile):
Data = pd.read_csv (DataIn, sep =sepFile)
return Data
File = ReadingAndColumnSelector (r"C:\Users\Bea\Desktop\lab\Programmi\Vmh4SF21beh (7).csv", ';')
'''
Function that selects a chosen number of elements before and after a chosen action
'''
def SelectionElement (num, action, serieBeh, serieMeans):
listaIndex = []
for i in range (len(serieBeh)):
if serieBeh[i] == action:
if serieBeh [i-1] != action:
if num <= i-1:
for k in range (-num,0):
listaIndex.append(i+k)
for l in range (1,num+1):
listaIndex.append(i+l)
break
else: print ('You have to choose a number smaller than ' + str(i) + '!')
else: continue
serieMeans = serieMeans.ix[listaIndex]
serieBeh = serieBeh.ix[listaIndex]
serieMeans.index = range(-num,num)
return serieMeans
Beh = File.loc [:, 'Beh']
Means = File.loc [:,'Mean(1)': 'Mean(38)']
MeansS = SelectionElement(20,'defense action', Beh, Means)
'''
Function that creates two dataframes that contain the activity before and the activity after the chosen beh
'''
def DataFrameAandB (serieM):
serieMT = serieM.transpose()
serieB = pd.DataFrame ()
serieA = pd.DataFrame ()
for i, row in serieMT.iterrows():
serieB = serieB.append( serieMT.loc [i,-len(serieMT.transpose())/2: -1])
serieA = serieA.append (serieMT.loc [i, 0: len(serieMT.transpose())]/2)
return (serieB, serieA)
meanTupla = DataFrameAandB (MeansS)
meanBefore = meanTupla[0]
meanAfter = meanTupla[1]
'''
Function for normalization
'''
def Normalization (dfA, dfB):
for index, row in dfB.iterrows():
m = dfB.loc[index,:].mean()
dfA.loc[index, :] = dfA.loc[index, :]/m
dfB.loc[index, :] = dfB.loc[index, :]/m
return (dfA, dfB)
DataA, DataB= Normalization (meanAfter, meanBefore)
Data = DataB.join(DataA)
'''
Kmeans and HM
'''
km = KMeans(n_clusters=3, init='k-means++', n_init=20)
km.fit(Data)
x = km.fit_predict(Data)
Data['Cluster'] = x
Data = Data.sort_values(by=['Cluster'])
df2 = Data.drop ('Cluster',1)
fig, ax = plt.subplots(figsize=(20,10))
ax.vlines([20],0,1, transform=ax.get_xaxis_transform(), colors='k')
ax.hlines([22],1,0, transform=ax.get_yaxis_transform(), colors='k')
df2 = df2.drop ('Mean(34)',0)
df2 = df2.drop ('Mean(20)',0)
df2 = df2.drop ('Mean(11)',0)
df2 = df2.drop ('Mean(2)', 0)
df2 = Data.drop ('Mean(10)',0)
df2 = df2.drop ('Mean(29)',0)
df2 = Data.drop ('Mean(32)',0)
df2 = df2.drop ('Mean(36)',0)
Data = df2.drop ('Mean(17)',0)
sns.clustermap(df2,annot = False, xticklabels=1, vmin = -5, vmax = 5, cmap="BuPu",col_cluster=False)
| null |
clsuter_MAP.py
|
clsuter_MAP.py
|
py
| 3,103 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.random.seed",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "sklearn.cluster.KMeans",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "seaborn.apionly.clustermap",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "seaborn.apionly",
"line_number": 91,
"usage_type": "name"
}
] |
147134071
|
# MIT License
#
# Copyright (c) 2019 Johan Brichau
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Some strings for ctype-style character classification
whitespace = ' \t\n\r\v\f'
ascii_lowercase = 'abcdefghijklmnopqrstuvwxyz'
ascii_uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
ascii_letters = ascii_lowercase + ascii_uppercase
digits = '0123456789'
hexdigits = digits + 'abcdef' + 'ABCDEF'
octdigits = '01234567'
punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
printable = digits + ascii_letters + punctuation + whitespace
def translate(s, map):
import io
sb = io.StringIO()
for c in s:
v = ord(c)
if v in map:
v = map[v]
if isinstance(v, int):
sb.write(chr(v))
elif v is not None:
sb.write(v)
else:
sb.write(c)
return sb.getvalue()
| null |
src/uPythonDevice/third_party/string.py
|
string.py
|
py
| 1,873 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "io.StringIO",
"line_number": 37,
"usage_type": "call"
}
] |
54681929
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ProjectShow', '0012_auto_20160806_0857'),
]
operations = [
migrations.AlterModelOptions(
name='project',
options={'ordering': ['status', 'project_name'], 'verbose_name': '\u9879\u76ee', 'verbose_name_plural': '\u9879\u76ee'},
),
migrations.AlterModelOptions(
name='task',
options={'ordering': ['is_finished', 'project'], 'verbose_name': '\u4efb\u52a1', 'verbose_name_plural': '\u4efb\u52a1'},
),
]
| null |
ProjectShow/migrations/0013_auto_20160806_0902.py
|
0013_auto_20160806_0902.py
|
py
| 670 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterModelOptions",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterModelOptions",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 18,
"usage_type": "name"
}
] |
588894253
|
import sys
import pickle
import logging
import datetime
from os import path
import pandas as pd
import numpy as np
import xgboost as xgb
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.impute import SimpleImputer
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# Log time-level and message for getting a running estimate
logging.basicConfig(stream=sys.stdout, format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
# This will only use the Label Encoder as on using the one hot encoding
# we have to guarantee all the values in that one-hot-column has to present
# in the test dataset also otherwise it will result in the feature mismatch bug
# TODO To resolve this we need put the columns in same order and intialize the old columns
# We will do this in later in this we are training a simple XGB Model for the seven days data
# Batch size of 10000
CHUNKSIZE = 100000
# Default value ACTUAL SIZE 75%
TRAIN_ITERATION = 30
CONSTANT_FILLER = 'missing'
NUMERIC_FILLER = 0
ALL_CONSUMER = 'allCustomer'
def mergeDataframe(df1, df2, column, joinType='inner'):
if column is None:
raise RuntimeError("Column can't be null. Please give the column value")
return pd.merge(df1, df2, on=column, how=joinType);
def loadAndMerge(files):
# placement_metrics_file {0}
# placement_metadata_file {1}
# content_metadata_file {2}
# placement_properties_file {3}
# creative_metadata_file {4}
df1 = pd.read_csv(files[0], skiprows=0, header=0)
df2 = pd.read_csv(files[1], skiprows=0, header=0)
df3 = pd.read_csv(files[2], skiprows=0, header=0)
df4 = pd.read_csv(files[3], skiprows=0, header=0)
df5 = pd.read_csv(files[4], skiprows=0, header=0)
logging.info('File Loaded');
df_merged_set = pd.merge(df1, df4, on='frozen_placement_id', how='inner')
df_merged_set = pd.merge(df_merged_set, df2, on='frozen_placement_id', how='inner')
df_merged_set = pd.merge(df_merged_set, df3, on='frozen_content_id', how='inner')
df_merged_set = pd.merge(df_merged_set, df5, on='creative_id', how='inner')
logging.info('File merged');
return df_merged_set
def label_column(df, column):
# TODO Make it general
unique_container_id_list = df.container_id.unique().tolist()
unique_container_id_hash = {}
position = 1
for val in unique_container_id_list:
unique_container_id_hash[val] = position
position = position + 1
df['container_id_label'] = df.apply (lambda row: unique_container_id_hash[row[column]], axis=1)
logging.info('Label done for '+ column)
return df
def generateCleanFile(files, training_file_name):
if path.exists(training_file_name):
logging.info("Training file is already present")
return
df_merged_set = loadAndMerge(files)
logging.info("Loading and dataset merged. Display head-- ")
logging.info(df_merged_set.head())
# Clean few Columns
# Targetting Columns
df_merged_set[['customer_targeting']] = df_merged_set[['customer_targeting']].fillna(value=ALL_CONSUMER)
df_merged_set[['guarantee_percentage']] = df_merged_set[['guarantee_percentage']].fillna(value=NUMERIC_FILLER)
df_merged_set[['component_display_name']] = df_merged_set[['component_display_name']].fillna(value=CONSTANT_FILLER)
logging.info('Targetting Columns Cleaned');
# Creative Columns
df_merged_set[['objective']] = df_merged_set[['objective']].fillna(value=CONSTANT_FILLER)
df_merged_set[['intent']] = df_merged_set[['intent']].fillna(value=CONSTANT_FILLER)
logging.info('Creative Columns Cleaned');
# Generate the unique set and map values
df_merged_set = label_column(df_merged_set, 'container_id')
logging.info("Dataframe Shape "+str(df_merged_set.shape))
df_merged_set.to_csv(training_file_name, index=False, encoding='utf-8')
logging.info('File Created')
def label_result(row, impression_count):
if(row['impressions']>impression_count):
return 1
return 0
def removeNaN(df, categoricalCols, defValue):
# Replace any NaN values
for col in categoricalCols:
df[[col]] = df[[col]].fillna(value=defValue)
return df
# Build the One hot encoder using all data
def buildOneHotEncoder(training_file_name, categoricalCols):
# using a global keyword
global TRAIN_ITERATION
one_hot_encoder = OneHotEncoder(sparse=False)
df = pd.read_csv(training_file_name, skiprows=0, header=0)
TOTAL_CHUNK_COUNT = df.shape[0]/CHUNKSIZE
TRAIN_ITERATION = int((75*TOTAL_CHUNK_COUNT)/100)
logging.info("ChunkSize: Iterations ::" +str(TOTAL_CHUNK_COUNT)+ " : " +str(TRAIN_ITERATION))
df = df[categoricalCols]
df = removeNaN(df, categoricalCols, CONSTANT_FILLER)
logging.info(str(df.columns))
one_hot_encoder.fit(df)
return one_hot_encoder
def trainModel(learning_rate, max_depth, training_file_name, model_filename, impression_count=10):
learning_params = {
'objective' : 'binary:logistic',
'colsample_bytree' : 0.3,
'learning_rate' : learning_rate,
'max_depth' : max_depth,
'alpha' : 5,
'n_estimators' : 200
}
# Init a base Model
xg_reg = {}
YColumns = ['result']
numericalCols = ['impressions', 'guarantee_percentage', 'container_id_label']
categoricalCols = [ 'component_name', 'slot_names', 'container_type', 'component_namespace',
'component_display_name', 'customer_targeting', 'site']
startOneHotIndex = len(numericalCols)
columns_to_keep = YColumns + numericalCols + categoricalCols
one_hot_encoder = buildOneHotEncoder(training_file_name, categoricalCols)
logging.info('One hot encoder')
# Convert it as it used for comparison
impression_count = int(impression_count)
#Model present then load and predict
if model_filename is not None and path.exists(model_filename):
logging.info("Model file present. Skipping to predication_package::")
xg_reg = pickle.load(open(model_filename, 'rb'))
predict(training_file_name, one_hot_encoder, xg_reg, impression_count)
return
chunkcount = 1
logging.info("Training for placements impressions < "+str(impression_count))
logging.info("Training for total chunks : "+str(TRAIN_ITERATION))
for chunk in pd.read_csv(training_file_name, chunksize=CHUNKSIZE):
# Train on a part of dataset and predict on other
if(chunkcount>TRAIN_ITERATION):
break
logging.info('Starting Training - '+str(chunkcount))
chunk['result'] = chunk.apply (lambda row: label_result(row, impression_count), axis=1)
# Get only the columns to evaluate
chunk = chunk[columns_to_keep + ['weblab']]
# Fill All Categorical Missing Values
chunk = removeNaN(chunk, YColumns + numericalCols, NUMERIC_FILLER)
chunk = removeNaN(chunk, categoricalCols, CONSTANT_FILLER)
# Get all rows where weblab is missing
df_merged_set_test = chunk.where(chunk['weblab']=="missing").dropna()
df_merged_set_test = df_merged_set_test[columns_to_keep]
logging.info('Weblab Removed: Shape - '+str(df_merged_set_test.shape))
INPUT = df_merged_set_test[numericalCols]
# guarantee_percentage nan replaced by missing so change back
INPUT.replace(CONSTANT_FILLER, NUMERIC_FILLER, inplace=True)
ONEHOT = df_merged_set_test[categoricalCols]
OUTPUT = df_merged_set_test[YColumns]
logging.info(str(INPUT.columns))
logging.info(str(ONEHOT.columns))
one_hot_encoded = one_hot_encoder.transform(ONEHOT)
logging.info('One hot encoding done')
dataMatrix = xgb.DMatrix(np.column_stack((INPUT.iloc[:,1:], one_hot_encoded)), label=OUTPUT)
if(chunkcount==1):
xg_reg = xgb.train(learning_params, dataMatrix, 200)
else:
# Takes in the intially model and produces a better one
xg_reg = xgb.train(learning_params, dataMatrix, 200, xgb_model=xg_reg)
chunkcount = chunkcount + 1
logging.info("Model saved "+str(xg_reg))
saveModel(xg_reg, learning_rate, max_depth, columns_to_keep, impression_count)
predict(training_file_name, one_hot_encoder, xg_reg, impression_count)
return
def saveModel(xg_reg, learning_rate_val, max_depth_val, columns_to_keep, impression_count):
model_filename = '/data/s3_file/models/XGB_MODEL_impression-{}_learning-{}_max_depth-{}_timestamp{}.sav'
timestamp_value = int(datetime.datetime.now().timestamp())
model_filename = model_filename.format(impression_count, learning_rate_val, max_depth_val, timestamp_value)
pickle.dump(xg_reg, open(model_filename, 'wb'))
column_filename = '/data/s3_file/models/XGB_MODEL_COLUMN_{}.sav'
column_filename = column_filename.format(timestamp_value)
pickle.dump(columns_to_keep, open(column_filename, 'wb'))
logging.info("Model and columns are saved")
def predict(training_file_name, one_hot_encoder, xg_reg, impression_count):
YColumns = ['result']
numericalCols = ['impressions', 'guarantee_percentage', 'container_id_label']
categoricalCols = [ 'component_name', 'slot_names', 'container_type', 'component_namespace',
'component_display_name', 'customer_targeting', 'site']
startOneHotIndex = len(numericalCols)
columns_to_keep = YColumns + numericalCols + categoricalCols
chunkcount = 1
for chunk in pd.read_csv(training_file_name, chunksize=CHUNKSIZE):
if(chunkcount<=TRAIN_ITERATION):
chunkcount = chunkcount + 1
continue
chunk['result'] = chunk.apply (lambda row: label_result(row, impression_count), axis=1)
# Get only the columns to evaluate
chunk = chunk[columns_to_keep + ['weblab']]
# Fill all Missing Values so dropna doesn't remove any row
chunk = removeNaN(chunk, numericalCols, NUMERIC_FILLER)
chunk = removeNaN(chunk, categoricalCols, CONSTANT_FILLER)
# Get all rows where weblab is missing
df_merged_set_test = chunk.where(chunk['weblab']=="missing").dropna()
df_merged_set_test = df_merged_set_test[columns_to_keep]
logging.info("Count to predict " + str(df_merged_set_test.shape))
df_merged_set_test = df_merged_set_test[columns_to_keep]
INPUT, OUTPUT = df_merged_set_test.iloc[:,1:], df_merged_set_test.iloc[:,0]
INPUT.iloc[:,1].replace(CONSTANT_FILLER, NUMERIC_FILLER, inplace=True)
logging.info(str(INPUT.columns))
one_hot_encoded = one_hot_encoder.transform(INPUT.iloc[:,startOneHotIndex:])
dataMatrix = xgb.DMatrix(np.column_stack((INPUT.iloc[:,1:startOneHotIndex], one_hot_encoded)))
predictions = xg_reg.predict(dataMatrix)
chunkcount = chunkcount + 1
# Result Analysis for Chunk
matrix = confusion_matrix(OUTPUT, np.around(predictions))
logging.info('Confusion Matrix : ' + str(matrix))
logging.info('Accuracy Score : ' + str(accuracy_score(OUTPUT, np.around(predictions))))
logging.info('Report : ')
logging.info(str(classification_report(OUTPUT, np.around(predictions))))
return
def startSteps(learning_rate, max_depth, impression_count, model_filename):
files = [
'/data/s3_file/FE/18January03FebPMetrics000',
'/data/s3_file/FE/18January03FebPMetadata000',
'/data/s3_file/FE/18January03FebCM000',
'/data/s3_file/FE/18January03FebPP000',
'/data/s3_file/FE/18January03FebCreative000'
]
training_file_name = '/data/s3_file/FE/18January03FebTrainingFile'
generateCleanFile(files, training_file_name)
trainModel(learning_rate, max_depth, training_file_name, model_filename, impression_count)
def __main__():
# count the arguments
if len(sys.argv) < 4:
raise RuntimeError("Please provide the learning_rate, max_depth and impressions count filter")
logging.info(sys.argv)
model_filename = None
if(len(sys.argv)>4):
model_filename = sys.argv[4]
startSteps(sys.argv[1], sys.argv[2], sys.argv[3], model_filename)
#This is required to call the main function
if __name__ == "__main__":
__main__()
| null |
XGBModelV2/XGB_11Feb2020.py
|
XGB_11Feb2020.py
|
py
| 11,690 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.basicConfig",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "pandas.merge",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pandas.merge",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "pandas.merge",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "pandas.merge",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pandas.merge",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.OneHotEncoder",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "xgboost.DMatrix",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "numpy.column_stack",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "xgboost.train",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "xgboost.train",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 225,
"usage_type": "attribute"
},
{
"api_name": "pickle.dump",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "xgboost.DMatrix",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "numpy.column_stack",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.confusion_matrix",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "numpy.around",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "numpy.around",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.classification_report",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "numpy.around",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 300,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 302,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 304,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 305,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 307,
"usage_type": "attribute"
}
] |
50254878
|
# https://github.com/tqdm/tqdm#redirecting-writing
import sys
import logging
from time import sleep
import contextlib
from tqdm.auto import tqdm
class LoggingStreamHandler(logging.StreamHandler):
"""
tqdm-aware susbstitute for logging's standard stream handler.
The default handler writes to stderr, which conflicts with tqdm; this
handler wraps that call with tqdm's `write()` to avoid conflicts.
Source: https://github.com/tqdm/tqdm/issues/193#issuecomment-233212170
"""
def __init__(self):
logging.StreamHandler.__init__(self)
def emit(self, record):
msg = self.format(record)
tqdm.write(msg)
class DummyTqdmFile(object):
"""Dummy file-like object that will write to tqdm"""
file = None
def __init__(self, file):
self.file = file
def write(self, x):
# Avoid print() second call (useless \n)
if len(x.rstrip()) > 0:
tqdm.write(x, file=self.file)
def flush(self):
return getattr(self.file, "flush", lambda: None)()
@contextlib.contextmanager
def std_out_err_redirect_tqdm():
orig_out_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = map(DummyTqdmFile, orig_out_err)
yield orig_out_err[0]
# Relay exceptions
except Exception as exc:
raise exc
# Always restore sys.stdout/err if necessary
finally:
sys.stdout, sys.stderr = orig_out_err
| null |
mackelab_toolbox/tqdm.py
|
tqdm.py
|
py
| 1,429 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.StreamHandler",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "logging.StreamHandler.__init__",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.StreamHandler",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "tqdm.auto.tqdm.write",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tqdm.auto.tqdm",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "tqdm.auto.tqdm.write",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "tqdm.auto.tqdm",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "sys.stdout",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "contextlib.contextmanager",
"line_number": 38,
"usage_type": "attribute"
}
] |
54227142
|
import random
import copy
import os
import sys
import csv
import time
import keras
import pickle
import tensorflow as tf
from collections import deque
from keras.utils import CustomObjectScope
import numpy as np
import skimage
import gym
class NoisyLayer(keras.layers.Layer):
def __init__(self, in_shape=(1,2592), out_dim=256, activation='tf.identity', name='Layer', **kwargs):
# Parameter assignments
self.in_shape = in_shape
self.out_units = out_dim
self.activation = eval(activation)
self.activation_str = activation
self.name = name
# Derived assignments
self.p = float(self.in_shape[1])
self.mu_interval_value = 1.0/np.sqrt(self.p)
self.sig_0 = 0.5
self.sig_init_constant = self.sig_0/np.sqrt(self.p)
# Naming weights/biases
self.w_mu_name = self.name+'w_mu'
self.w_si_name = self.name+'w_si'
self.b_mu_name = self.name+'b_mu'
self.b_si_name = self.name+'b_si'
super(NoisyLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Initializers
self.mu_initializer = tf.initializers.random_uniform(minval=-self.mu_interval_value, maxval=self.mu_interval_value) # Mu-initializer
self.si_initializer = tf.initializers.constant(self.sig_init_constant) # Sigma-initializer
# Weights
# 'Normal' weights -- (get access by: [model.noisy_layer].get_weights()[0] - tested -- correct!)
self.w_mu = self.add_weight( name=self.w_mu_name,
shape=(self.in_shape[1], self.out_units),
initializer=self.mu_initializer,
trainable=True)
# 'Noisy' weights -- (get access by: .get_weights()[1] - tested -- correct!)
self.w_si = self.add_weight( name=self.w_si_name,
shape=(self.in_shape[1], self.out_units),
initializer=self.si_initializer,
trainable=True)
# Biases
# 'Normal' biases -- (get access by: .get_weights()[2][0] - tested -- correct!)
self.b_mu = self.add_weight( name=self.b_mu_name,
shape=(self.in_shape[0], self.out_units),
initializer=self.mu_initializer,
trainable=True)
# 'Noisy' biases -- (get access by: .get_weights()[3][0] - tested -- correct!)
self.b_si = self.add_weight( name=self.b_si_name,
shape=(self.in_shape[0], self.out_units),
initializer=self.si_initializer,
trainable=True)
# Make sure this function is going to be called on init
super(NoisyLayer, self).build(input_shape)
def call(self, inputs):
# Resample noise - once per input-batch
self.assign_resampling()
# Putting it all together
self.w = tf.math.add(self.w_mu, tf.math.multiply(self.w_si, self.w_eps))
self.b = tf.math.add(self.b_mu, tf.math.multiply(self.b_si, self.q_eps))
return self.activation(tf.math.add(tf.linalg.matmul(inputs, self.w), self.b))
# Functionality supporting deepcopying, saving, and loading
def compute_output_shape(self, input_shape):
return (input_shape[0], self.out_units)
def get_config(self):
config = super(NoisyLayer, self).get_config()
config['in_shape'] = self.in_shape
config['out_dim'] = self.out_units
config['activation'] = self.activation_str
config['name'] = self.name
return config
# Noise sampling - Factorised Gaussian noise
def assign_resampling(self):
# p = related to (i) inputs; q = related to (j) outputs
self.p_eps = self.f(self.resample_noise([self.in_shape[1], 1])) # = f(eps_i) in paper
self.q_eps = self.f(self.resample_noise([1, self.out_units])) # = eps_b = f(eps_j) in paper; Eqn. 11
self.w_eps = self.p_eps * self.q_eps # Cartesian product of input_noise x output_noise; Eqn. 10
def resample_noise(self, shape):
return tf.random.normal(shape, mean=0.0, stddev=1.0, seed=None, name=None)
def f(self, x):
return tf.math.multiply(tf.math.sign(x), tf.math.sqrt(tf.math.abs(x)))
# Get custom weights (the non-noisy ones for evaluation games)
def get_non_noisy_weights(self):
return [self.get_weights()[0], self.get_weights()[2][0]] # return weights_mu (=self.w_mu) and bias_mu (=self.b_mu) (;the weights + biases not related to randomness/noise)
class Network:
def __init__(self, nettype, actionspace_size, learning_rate, gradient_momentum, gradient_min, noisy_flag):
frames_input = keras.layers.Input((84, 84, 4))
actions_input = keras.layers.Input((actionspace_size,))
self.conv1_layer = keras.layers.Conv2D(16, (8, 8), strides=(4, 4), activation="relu")
self.conv2_layer = keras.layers.Conv2D(32, (4, 4), strides=(2, 2), activation="relu")
conv1 = self.conv1_layer(frames_input)
conv2 = self.conv2_layer(conv1)
flattened = keras.layers.Flatten()(conv2)
if noisy_flag:
self.hidden_layer = NoisyLayer(in_shape=(1,2592), out_dim=256, activation="tf.nn.relu", name=str('Noisy1_' + nettype))
hidden = self.hidden_layer(flattened)
if nettype == 'q':
self.output_layer = NoisyLayer(in_shape=(1,256), out_dim=actionspace_size, activation="tf.identity", name=str('Noisy2_' + nettype))
output = self.output_layer(hidden)
filtered_output = keras.layers.merge.Multiply()([output, actions_input])
self.model = keras.models.Model(inputs=[frames_input, actions_input], outputs=filtered_output)
if nettype == 'v':
self.output_layer = NoisyLayer(in_shape=(1,256), out_dim=1, activation="tf.identity", name=str('Noisy2_' + nettype))
output = self.output_layer(hidden)
self.model = keras.models.Model(inputs=frames_input, outputs=output)
else:
self.hidden_layer = keras.layers.Dense(256, activation="relu")
hidden = self.hidden_layer(flattened)
if nettype == 'q':
self.output_layer = keras.layers.Dense(actionspace_size)
output = self.output_layer(hidden)
filtered_output = keras.layers.merge.Multiply()([output, actions_input])
self.model = keras.models.Model(inputs=[frames_input, actions_input], outputs=filtered_output)
if nettype == 'v':
self.output_layer = keras.layers.Dense(1)
output = self.output_layer(hidden)
self.model = keras.models.Model(inputs=frames_input, outputs=output)
self.model.compile(loss='mse', optimizer=keras.optimizers.RMSprop(lr=learning_rate, rho=gradient_momentum, epsilon=gradient_min))
self.noisy_flag = noisy_flag
self.nettype = nettype
def get_custom_model_weights(self): # Get custom set of weights which includes only non-noisy ones for NoisyLayer
weights = []
weights.append(self.conv1_layer.get_weights()) #self.x_layer.get_weights()[0] == Weights && self.x_layer.get_weights()[1] == Biases
weights.append(self.conv2_layer.get_weights())
weights.append(self.hidden_layer.get_weights() if not self.noisy_flag else self.hidden_layer.get_non_noisy_weights())
weights.append(self.output_layer.get_weights() if not self.noisy_flag else self.output_layer.get_non_noisy_weights())
return weights
def set_non_noisy_weights(self, weights):
#if self.noisy_flag or self.nettype is 'v':
# print('Error. Not supposed to call set_non_noisy_weights()!')
# return
self.conv1_layer.set_weights(weights[0])
self.conv2_layer.set_weights(weights[1])
self.hidden_layer.set_weights(weights[2])
self.output_layer.set_weights(weights[3])
class Agent:
def __init__(self, q_net_network, q_net, v_net_network, v_net, target_net, memory, batch_size, discount_factor, actionspace_size, epsilon, epsilon_decay, epsilon_min, noisy_flag):
self.v_net_network = v_net_network
self.q_net_network = q_net_network # Containing functions defined in network class; To get access to non-noisy weights in case of NoisyNet
self.eval_network = None # Containing functions defined in network class; To be able to set current q_net_network weights
self.q_net = q_net # Contains only model-architecture
self.v_net = v_net
self.target_net = target_net
self.memory = memory
self.batch_size = batch_size
self.discount_factor = discount_factor
self.actionspace_size = actionspace_size
self.epsilon = epsilon
self.epsilon_decay = epsilon_decay
self.epsilon_min = epsilon_min
self.weight_updates = 0
self.noisy_flag = noisy_flag
def train(self):
mini_batch = random.sample(self.memory, self.batch_size)
prev_states = np.empty((self.batch_size, 84, 84, 4), dtype=np.float32)
next_states = np.empty((self.batch_size, 84, 84, 4), dtype=np.float32)
actions = np.empty(self.batch_size, dtype=np.int32)
rewards = np.empty(self.batch_size, dtype=np.float32)
terminals = np.empty(self.batch_size, dtype=np.bool)
for i in range(self.batch_size):
prev_states[i] = np.float32(mini_batch[i][0] / 255.0)
next_states[i] = np.float32(mini_batch[i][3] / 255.0)
actions[i] = mini_batch[i][1]
rewards[i] = mini_batch[i][2]
terminals[i] = mini_batch[i][4]
q_values = np.empty(self.batch_size)
v_values = np.zeros((self.batch_size,))
v_targets = self.target_net.predict(next_states)
for i in range(self.batch_size):
if terminals[i]:
q_values[i] = rewards[i]
v_values[i] = rewards[i]
else:
q_values[i] = rewards[i] + self.discount_factor * v_targets[i]
v_values[i] = rewards[i] + self.discount_factor * v_targets[i]
one_hot_actions = np.eye(self.actionspace_size)[np.array(actions).reshape(-1)]
self.q_net.fit([prev_states, one_hot_actions], one_hot_actions * q_values[:, None], batch_size=self.batch_size, epochs=1, verbose=0)
self.v_net.fit(prev_states, v_values, batch_size=self.batch_size, epochs=1, verbose=0)
self.weight_updates += 1
self.updateEpsilon()
def play(self, environment, evaluation_games, output_path):
average_reward = 0.0
self.eval_network.set_non_noisy_weights(self.q_net_network.get_custom_model_weights())
for _ in range(evaluation_games):
environment.reset()
observation, reward, done, _ = environment.step(1)
frame = getPreprocessedFrame(observation)
state = np.stack((frame, frame, frame, frame), axis=2)
state = np.reshape([state], (1, 84, 84, 4))
accumulated_epoch_reward = 0
while not done:
action = self.chooseAction(state, True)
observation, reward, done, _ = environment.step(action)
accumulated_epoch_reward += reward
frame = getPreprocessedFrame(observation)
frame = np.reshape([frame], (1, 84, 84, 1))
state = np.append(frame, state[:, :, :, :3], axis=3)
average_reward += accumulated_epoch_reward / evaluation_games
writeLog(output_path + 'eval.csv', [self.weight_updates, average_reward])
def chooseAction(self, state, evaluation=False):
if self.noisy_flag and evaluation: # Evaluate non-noisily in case of Noisy-Net
model = self.eval_network.model
else:
model = self.q_net
state = np.float32(state / 255.0)
actions_mask = np.ones(self.actionspace_size).reshape(1, self.actionspace_size)
q_values = model.predict([state, actions_mask])
action = np.argmax(q_values)
return action
def useEpsilonGreedy(self, state):
if not self.noisy_flag and random.random() < self.epsilon:
action = random.randrange(self.actionspace_size)
else:
action = self.chooseAction(state)
return action
def updateEpsilon(self):
self.epsilon = max(self.epsilon_min, self.epsilon - self.epsilon_decay)
def storeExperience(self, state, action, reward, next_state, terminal):
self.memory.append((state, action, reward, next_state, terminal))
def updateTargetNet(self):
self.target_net.set_weights(self.v_net.get_weights())
def getPreprocessedFrame(observation):
observation = skimage.color.rgb2gray(observation)
observation = skimage.transform.resize(observation, (84, 84))
observation = np.uint8(observation * 255)
return observation
def writeLog(path, content):
with open(path, 'a') as log:
csv_writer = csv.writer(log, delimiter=';')
csv_writer.writerow(content)
def saveModel(path, model):
with CustomObjectScope({"NoisyLayer":NoisyLayer}):
model.save(path)
def loadModel(path):
with CustomObjectScope({"NoisyLayer":NoisyLayer}):
return keras.models.load_model(path)
def saveAgent(path, agent):
with open(path, 'wb') as saved_object:
pickle.dump(agent, saved_object, pickle.HIGHEST_PROTOCOL)
def loadAgent(path):
with open(path, 'rb') as saved_object:
return pickle.load(saved_object)
def main():
if len(sys.argv) != 3:
print("Please provide a valid environment and session ID")
return
environment_id = sys.argv[1]
session_id = sys.argv[2]
path = './Data/NoisyNet_DQV/' + environment_id + '/' + session_id + '/'
if not os.path.exists(path):
os.makedirs(path)
print("This is NoisyNet-DQV " + environment_id + " " + session_id + "\nSession will be stored at " + path)
environment = gym.make(environment_id)
eval_environment = gym.make(environment_id)
training_start = 50000
update_target_step = 10000
evaluation_step = 10000
evaluation_games = 20
training_stop = 10000000
actionspace_size = environment.action_space.n
batch_size = 32
discount_factor = 0.99
learning_rate = 0.00025
gradient_momentum = 0.95
gradient_min = 0.01
epsilon = 1
epsilon_decay = 1e-06
epsilon_min = 0.1
noisy_flag = True
# Load previous session or create new one
if os.path.isfile(path + 'agent.pkl'):
print("agent found; loading previous agent")
agent = loadAgent(path + 'agent.pkl')
noisy_flag = agent.noisy_flag
step_number = agent.weight_updates + training_start
# Q-net
q_net = loadModel(path + 'qmodel.h5')
q_net_network = Network('q', actionspace_size, learning_rate, gradient_momentum, gradient_min, noisy_flag)
q_net_network.model.set_weights(q_net.get_weights())
agent.q_net_network = q_net_network
agent.q_net = agent.q_net_network.model
# V-net
v_net = loadModel(path + 'vmodel.h5')
v_net_network = Network('v', actionspace_size, learning_rate, gradient_momentum, gradient_min, noisy_flag)
v_net_network.model.set_weights(v_net.get_weights())
agent.v_net_network = v_net_network
agent.v_net = agent.v_net_network.model
# Target net
agent.target_net = loadModel(path + 'targetmodel.h5')
else:
if os.path.isfile(path + 'log.csv'):
print("incomplete session found; aborting")
return
print("no agent found; creating new agent")
q_net_network = Network('q', actionspace_size, learning_rate, gradient_momentum, gradient_min, noisy_flag)
q_net = q_net_network.model
v_net_network = Network('v', actionspace_size, learning_rate, gradient_momentum, gradient_min, noisy_flag)
v_net = v_net_network.model
with CustomObjectScope({"NoisyLayer":NoisyLayer}):
target_net = copy.deepcopy(v_net)
memory = deque(maxlen=1000000)
agent = Agent(q_net_network, q_net, v_net_network, v_net, target_net, memory, batch_size, discount_factor, actionspace_size, epsilon, epsilon_decay, epsilon_min, noisy_flag)
step_number = 0
# Evaluation-network in case of NoisyNet training; Standard DQN network to get non-noisy evaluation
agent.eval_network = Network('q', actionspace_size, learning_rate, gradient_momentum, gradient_min, False)
end_time = time.time() + 863600 # 10 Days
print("starting")
# Main loop
while agent.weight_updates < training_stop and time.time() < end_time:
environment.reset()
for _ in range(random.randint(1, 25)):
observation, reward, done, info = environment.step(1)
# Init state based on first frame
frame = getPreprocessedFrame(observation)
state = np.stack((frame, frame, frame, frame), axis=2)
state = np.reshape([state], (1, 84, 84, 4))
accumulated_epoch_reward = 0
while not done and agent.weight_updates < training_stop and time.time() < end_time:
step_number += 1
terminal = False
lives = info['ale.lives']
# Choose and perform action and check if life lost
action = agent.useEpsilonGreedy(state)
observation, reward, done, info = environment.step(action)
accumulated_epoch_reward += reward
reward = np.clip(reward, -1., 1.)
if lives > info['ale.lives'] or done:
terminal = True
# Update state based on new frame
prev_state = state
frame = getPreprocessedFrame(observation)
frame = np.reshape([frame], (1, 84, 84, 1))
state = np.append(frame, state[:, :, :, :3], axis=3)
# Store state in memory
agent.storeExperience(prev_state, action, reward, state, terminal)
# Train agent
if step_number > training_start:
agent.train()
# Potentially update target net
if step_number % update_target_step == 0:
agent.updateTargetNet()
# Evaluation games
if step_number % evaluation_step == 0:
agent.play(eval_environment, evaluation_games, path)
# Produce output
writeLog(path + 'log.csv', [agent.weight_updates, accumulated_epoch_reward, agent.epsilon])
# Save models and agent
#print('About to save models.')
saveModel(path + 'qmodel.h5', agent.q_net)
#saveModel(path + 'vmodel.h5', agent.v_net)
#saveModel(path + 'targetmodel.h5', agent.target_net)
#print('Saved models. Bye!')
#agent.q_net = agent.v_net = agent.target_net = agent.q_net_network = agent.v_net_network = agent.eval_network = None
#saveAgent(path + 'agent.pkl', agent)
if __name__ == "__main__":
main()
| null |
Noisy-DQV.py
|
Noisy-DQV.py
|
py
| 19,736 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "keras.layers",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "tensorflow.initializers.random_uniform",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "tensorflow.initializers",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.initializers.constant",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "tensorflow.initializers",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.math.add",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "tensorflow.math",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.math.multiply",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "tensorflow.math.add",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "tensorflow.math",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.math.multiply",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "tensorflow.math.add",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "tensorflow.math",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.linalg.matmul",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "tensorflow.linalg",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.random.normal",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "tensorflow.random",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.math.multiply",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "tensorflow.math",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.math.sign",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "tensorflow.math.sqrt",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "tensorflow.math.abs",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "keras.layers.Input",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "keras.layers.Input",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "keras.layers.Flatten",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "keras.layers.merge.Multiply",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "keras.models.Model",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "keras.models",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "keras.models.Model",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "keras.models",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "keras.layers.Dense",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "keras.layers.Dense",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "keras.layers.merge.Multiply",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "keras.models.Model",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "keras.models",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "keras.layers.Dense",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "keras.models.Model",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "keras.models",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "keras.optimizers.RMSprop",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "keras.optimizers",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "random.sample",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "numpy.empty",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "numpy.empty",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 212,
"usage_type": "attribute"
},
{
"api_name": "numpy.empty",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "numpy.empty",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "numpy.bool",
"line_number": 214,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "numpy.eye",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "skimage.color.rgb2gray",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "skimage.color",
"line_number": 299,
"usage_type": "attribute"
},
{
"api_name": "skimage.transform.resize",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "skimage.transform",
"line_number": 300,
"usage_type": "attribute"
},
{
"api_name": "numpy.uint8",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "keras.utils.CustomObjectScope",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "keras.utils.CustomObjectScope",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "keras.models.load_model",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "keras.models",
"line_number": 315,
"usage_type": "attribute"
},
{
"api_name": "pickle.dump",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "pickle.HIGHEST_PROTOCOL",
"line_number": 319,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 326,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 330,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 331,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 333,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "gym.make",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "gym.make",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 358,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 381,
"usage_type": "attribute"
},
{
"api_name": "keras.utils.CustomObjectScope",
"line_number": 391,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 415,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 416,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 420,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 429,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 436,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 437,
"usage_type": "call"
}
] |
383581569
|
#####################################
# CMSSW configuration file - Python #
#####################################
import FWCore.ParameterSet.Config as cms
import datetime
process = cms.Process("USER")
###########################
# Basic process controls. #
###########################
today = str(datetime.date.today())
fileLabel = ''
# Source
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring('file:reco.root');
process.source = cms.Source ("PoolSource",fileNames = readFiles)
##################
# Basic services #
##################
process.MessageLogger = cms.Service("MessageLogger")
process.TFileService = cms.Service("TFileService",
fileName = cms.string('results_'+fileLabel+today+'.root')
)
#process.Tracer = cms.Service("Tracer")
from RSGraviton.RSAnalyzer.Zhistos_cff import histograms as Zhistos
from RSGraviton.RSAnalyzer.Ghistos_cff import histograms as Ghistos
from RSGraviton.RSAnalyzer.basicjethistos_cff import histograms as jethistos
############################
# Reconstruct the C-A jets #
############################
# Load geometry
process.load("Configuration.StandardSequences.Geometry_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = cms.string('IDEAL_V9::All')
process.load("Configuration.StandardSequences.MagneticField_cff")
# CATopJets
from RecoJets.JetProducers.CATopJetParameters_cfi import *
from RecoJets.JetProducers.GenJetParameters_cfi import *
from RecoJets.JetProducers.CaloJetParameters_cfi import *
process.caTopJetsProducer = cms.EDProducer("CATopJetProducer",
CATopJetParameters,
CaloJetParameters
)
# turn off sum-et dependent stuff.
process.caTopJetsProducer.ptBins = cms.vdouble(0,10e9)
process.caTopJetsProducer.rBins = cms.vdouble(0.8,0.8)
process.caTopJetsProducer.ptFracBins = cms.vdouble(0.05,0.05)
process.caTopJetsProducer.nCellBins = cms.vint32(1,1)
process.makeCAJets = cms.Sequence(process.caTopJetsProducer)
##################
# Kinematic cuts #
##################
# 1 Primary cut with two jets above 100 GeV
process.twoJetsAboveHundred = cms.EDFilter("EtMinBasicJetCountFilter",
src = cms.InputTag("caTopJetsProducer"),
minNumber = cms.uint32(2),
etMin = cms.double(5.0)
)
process.getTwoJetsAboveHundred = cms.EDProducer("LargestEtBasicJetSelector",
src = cms.InputTag("caTopJetsProducer"),
maxNumber = cms.uint32(2)
)
process.makePrelimCuts = cms.Sequence(process.twoJetsAboveHundred + process.getTwoJetsAboveHundred)
# 2 Secondary cut with 1st jet above 250 GeV, 2nd jet above 180 GeV
process.cutSecondJet = cms.EDFilter("EtMinBasicJetCountFilter",
src = cms.InputTag("caTopJetsProducer"),
minNumber = cms.uint32(2),
etMin = cms.double(10.0)
)
process.cutFirstJet = cms.EDFilter("EtMinBasicJetCountFilter",
src = cms.InputTag("caTopJetsProducer"),
minNumber = cms.uint32(1),
etMin = cms.double(12.0)
)
process.getTwoJetsAfterMainCuts = cms.EDProducer("LargestEtBasicJetSelector",
src = cms.InputTag("caTopJetsProducer"),
maxNumber = cms.uint32(2)
)
process.makeMainCuts = cms.Sequence(process.cutSecondJet + process.cutFirstJet + process.getTwoJetsAfterMainCuts)
#########
# Plots #
#########
# Basic jet plots
process.plotJetsAfterPrelim = cms.EDAnalyzer("CandViewHistoAnalyzer",
src = cms.InputTag("getTwoJetsAboveHundred"),
histograms = jethistos
)
process.plotJetsAfterMainCuts = cms.EDAnalyzer("CandViewHistoAnalyzer",
src = cms.InputTag("getTwoJetsAfterMainCuts"),
histograms = jethistos
)
# Flow producer
process.flowAfterPrelim = cms.EDProducer("RSFlowAnalyzer",
tracks = cms.InputTag("generalTracks"),
jets = cms.InputTag("getTwoJetsAboveHundred"),
maxDeltaR = cms.double(0.7)
)
process.flowAfterMainCuts = cms.EDProducer("RSFlowAnalyzer",
tracks = cms.InputTag("generalTracks"),
jets = cms.InputTag("getTwoJetsAfterMainCuts"),
maxDeltaR = cms.double(0.7)
)
# My analyzer
process.analyzerAfterPrelim = cms.EDAnalyzer("CompoundJetAnalyzer",
src = cms.InputTag("getTwoJetsAboveHundred"),
flowSrc = cms.InputTag("flowAfterPrelim")
)
process.analyzerAfterMainCuts = cms.EDAnalyzer("CompoundJetAnalyzer",
src = cms.InputTag("getTwoJetsAfterMainCuts"),
flowSrc = cms.InputTag("flowAfterMainCuts")
)
#########
# Paths #
#########
# Make the jets.
process.p1 = cms.Path(process.makeCAJets + process.makePrelimCuts + process.plotJetsAfterPrelim + process.flowAfterPrelim + process.analyzerAfterPrelim)
process.p2 = cms.Path(process.makeCAJets + process.makeMainCuts + process.plotJetsAfterMainCuts + process.flowAfterMainCuts + process.analyzerAfterMainCuts)
| null |
RSGraviton/RSAnalyzer/analysis_compoundJetAlgo/rs_compoundJet_cfg.py
|
rs_compoundJet_cfg.py
|
py
| 6,415 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "FWCore.ParameterSet.Config.Process",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "datetime.date.today",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "FWCore.ParameterSet.Config.untracked.PSet",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config.untracked",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.untracked.int32",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config.untracked.vstring",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config.untracked",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.Source",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.Service",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.Service",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.string",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.string",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.EDProducer",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.vdouble",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.vdouble",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.vdouble",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.vint32",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.Sequence",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.EDFilter",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.InputTag",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.uint32",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.double",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.EDProducer",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.InputTag",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.uint32",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.Sequence",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.EDFilter",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.InputTag",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.uint32",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.double",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.EDFilter",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.InputTag",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.uint32",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.double",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.EDProducer",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.InputTag",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.uint32",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.Sequence",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.EDAnalyzer",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.InputTag",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "RSGraviton.RSAnalyzer.basicjethistos_cff.histograms",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.EDAnalyzer",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.InputTag",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "RSGraviton.RSAnalyzer.basicjethistos_cff.histograms",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.EDProducer",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.InputTag",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.InputTag",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.double",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.EDProducer",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.InputTag",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.InputTag",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.double",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.EDAnalyzer",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.InputTag",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.InputTag",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.EDAnalyzer",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.InputTag",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.InputTag",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.Path",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "FWCore.ParameterSet.Config.Path",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "FWCore.ParameterSet.Config",
"line_number": 147,
"usage_type": "name"
}
] |
270497283
|
import itertools
import os
import re
import sys
import time
import requests
ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
another_user = 'Mozilla/5.0 (compatible; MSIE 10.0; Windows Phone 8.0; Trident/6.0; IEMobile/10.0; ARM; Touch; NOKIA; Lumia 920)'
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-us,en;q=0.5',
}
session = requests.Session()
CURRENT_DIR = os.getcwd()
def sanitize_filename(s, restricted=False, is_id=False):
def replace_insane(char):
if restricted and char in ACCENT_CHARS:
return ACCENT_CHARS[char]
if char == '?' or ord(char) < 32 or ord(char) == 127:
return ''
elif char == '"':
return '' if restricted else '\''
elif char == ':':
return '_-' if restricted else ' -'
elif char in '\\/|*<>':
return '_'
if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
return '_'
if restricted and ord(char) > 127:
return '_'
return char
s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
result = ''.join(map(replace_insane, s))
if not is_id:
while '__' in result:
result = result.replace('__', '_')
result = result.strip('_')
if restricted and result.startswith('-_'):
result = result[2:]
if result.startswith('-'):
result = '_' + result[len('-'):]
result = result.lstrip('.')
if not result:
result = '_'
return result
def get_name_show_cmd(ele, title):
if ele:
return '(%s) %s' % (ele, title)
return '(%s)' % (title)
KNOWN_EXTENSIONS = (
'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
'flv', 'f4v', 'f4a', 'f4b',
'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
'mkv', 'mka', 'mk3d',
'avi', 'divx',
'mov',
'asf', 'wmv', 'wma',
'3gp', '3g2',
'mp3',
'flac',
'ape',
'wav',
'f4f', 'f4m', 'm3u8', 'smil')
def try_get(src, getter, expected_type=None):
if not isinstance(getter, (list, tuple)):
getter = [getter]
for get in getter:
try:
v = get(src)
except (AttributeError, KeyError, TypeError, IndexError):
pass
else:
if expected_type is None or isinstance(v, expected_type):
return v
def mimetype2ext(mt):
if mt is None:
return None
ext = {
'audio/mp4': 'm4a',
'audio/mpeg': 'mp3',
}.get(mt)
if ext is not None:
return ext
_, _, res = mt.rpartition('/')
res = res.split(';')[0].strip().lower()
return {
'3gpp': '3gp',
'smptett+xml': 'tt',
'ttaf+xml': 'dfxp',
'ttml+xml': 'ttml',
'x-flv': 'flv',
'x-mp4-fragmented': 'mp4',
'x-ms-sami': 'sami',
'x-ms-wmv': 'wmv',
'mpegurl': 'm3u8',
'x-mpegurl': 'm3u8',
'vnd.apple.mpegurl': 'm3u8',
'dash+xml': 'mpd',
'f4m+xml': 'f4m',
'hds+xml': 'f4m',
'vnd.ms-sstr+xml': 'ism',
'quicktime': 'mov',
'mp2t': 'ts',
}.get(res, res)
class SoundCloudException(Exception):
"""Raise when have a bug"""
| null |
utils.py
|
utils.py
|
py
| 3,876 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "itertools.chain",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "requests.Session",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 43,
"usage_type": "call"
}
] |
554800548
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 21 04:23:44 2018
@author: Traoreabraham
"""
import numpy as np
from multiprocessing import Pool
import scipy
import sys
sys.path.append("..")
#sys.path.append("/home/scr/etu/sil821/traorabr/Tensorly/")
import tensorly as tl
tl.set_backend('numpy')
from tensorly.base import unfold
#sys.path.append("/home/scr/etu/sil821/traorabr/OnlineTensorDictionaryLearning/")
#sys.path.append("/Users/Traoreabraham/Desktop/OnlineTensorDictionaryLearning/")
from MiscellaneousFunctions.MethodsTSPen import GenerateTensorsGeneral
from MiscellaneousFunctions.ALTO import ALTO_setWithpredefinedEpochs
import pdb
from tensorly import tenalg
from tensorly.backend import mxnet_backend
#np.seterr(all='ignore')
np.random.seed(1)
#np.random.seed(100): the results were obtained from this seed
def Nonnegativepart(Elements):#The parameters are arrays
result=[]
for element in Elements:
result.append(np.maximum(element,0))
return result
#def Error(X,G,listoffactors,setting):#All the parameters are tensors
# error=0
# if(setting=="Single"):
# error=np.power(T.norm(X-Tensor_matrixproduct(G,listoffactors),2),2)
# return error
# if(setting=="MiniBatch"):
# rho=len(X)
# for r in range(rho):
# error=error+np.power(T.norm(X[r]-Tensor_matrixproduct(G[r],listoffactors),2),2)
# return error
def ErrorSingle(args):
#This function computes the square of the fitting error
error=np.power(tl.norm(args[0][args[3]]-Tensor_matrixproduct(args[1][args[3]],args[2]),2),2)
return error
def ErrorSet(X_set,G,listoffactors,pool):
#This function computes the square of the fitting error for several tensors
#All the paramters are of tensor type
L=len(X_set)
Result=pool.map(ErrorSingle,[[X_set,G,listoffactors,l] for l in range(L)])
return np.array(Result)
def Error(X,G,listoffactors,setting,pool):
#This function computes the fitting error in batch and online setting
#All the parameters are of tensor type
if(setting=="Single"):
error=np.power(tl.norm(X-Tensor_matrixproduct(G,listoffactors),2),2)
return error
if(setting=="MiniBatch"):
Errorlist=ErrorSet(X,G,listoffactors,pool)
return np.sum(np.array(Errorlist))
def FittingErrorComputation(X_set,G_set,listoffactors):
L=len(X_set)
Fitting_error=[]
for t in range(L):
Fitting_error.append(Error(tl.tensor(X_set[t]),tl.tensor(G_set[t]),listoffactors,"Single"))
return Fitting_error
def Proximal_operator(X,step):#The parameter is a tensor
Res=np.copy(mxnet_backend.to_numpy(X))
Res=np.sign(Res)*np.maximum(np.abs(Res)-step,0)
return tl.tensor(Res)
def Operations_listmatrices(listofmatrices,operationnature):#The parameters are tensors
Res=[]
if (operationnature=="Turnintoarray"):
for matrix in listofmatrices:
element=np.copy(mxnet_backend.to_numpy(matrix))
Res.append(element)#computes A.T
return Res
if (operationnature=="Transpose"):
for matrix in listofmatrices:
element=np.copy(mxnet_backend.to_numpy(matrix))
Res.append(tl.tensor(element.T))#computes A.T
return Res
if(operationnature=="Transposetimes"):
for matrix in listofmatrices:
element=np.copy(mxnet_backend.to_numpy(matrix))
Res.append(tl.tensor(np.dot(element.T,element))) #computes A.T*A
return Res
if(operationnature=="NormI"):
for matrix in listofmatrices:
Res.append(tl.norm(matrix,1))
return Res
if(operationnature=="NormII"):
for matrix in listofmatrices:
Res.append(np.power(tl.norm(matrix,2),2))
return Res
if(operationnature=="Tensorize"):
for matrix in listofmatrices:
Res.append(tl.tensor(matrix))
return Res
def derivativeCore(X,G,listofmatrices):#The entries are tensors
#Firstterm=T.tensor(np.copy(mxnet_backend.to_numpy(X)))
Firstterm=tl.tensor(X)
Firstterm=Tensor_matrixproduct(Firstterm,Operations_listmatrices(listofmatrices,"Transpose"))
#Secondterm=T.tensor(np.copy(mxnet_backend.to_numpy(G)))
Secondterm=tl.tensor(G)
Secondterm=Tensor_matrixproduct(Secondterm,Operations_listmatrices(listofmatrices,"Transposetimes"))
Res=Firstterm-Secondterm
return Res
def derivativeDict(X,G,A,listofmatrices,alpha,theta,n):#the parameters are tensors
listoffactors=list(listofmatrices)
listoffactors[n]=tl.tensor(np.identity(X.shape[n]))
WidehatX=Tensor_matrixproduct(X,Operations_listmatrices(listoffactors,"Transpose"))
listoffactors[n]=tl.tensor(np.identity(G.shape[n]))
B=unfold(Tensor_matrixproduct(G,listoffactors),n)
Result=tl.tensor(np.dot(mxnet_backend.to_numpy(unfold(WidehatX,n)),mxnet_backend.to_numpy(unfold(G,n)).T))-tl.tensor(np.dot(mxnet_backend.to_numpy(A),np.dot(mxnet_backend.to_numpy(B),mxnet_backend.to_numpy(B).T)))+alpha*(1-theta)*A
#Res1=np.dot(A,np.dot(B,B.T))
#Res2=alpha*(1-theta)*A
return Result
def Factorupdateproblem(X,G,Ainit,listoffactorsmatrices,alpha,theta,n,maxiter,epsilon):
Anew=tl.tensor(Ainit)
Aold=tl.tensor(np.zeros(Anew.shape))
Aresult=tl.tensor(np.zeros(Anew.shape))
error=np.power(tl.norm(X-Tensor_matrixproduct(G,listoffactorsmatrices),2),2)+alpha*(1-theta)*np.power(tl.norm(Anew,2),2)
#previouserror=0
nbiter=0
while(nbiter<maxiter):
nbiter=nbiter+1
Aold=Anew
#previouserror=error
Anew=derivativeDict(X,G,Aold,listoffactorsmatrices,alpha,theta,n)
Anew=Anew/tl.norm(Anew,2)
error=np.power(tl.norm(X-Tensor_matrixproduct(G,listoffactorsmatrices),2),2)#+alpha*(1-theta)*np.power(T.norm(Anew,2),2)
Aresult=Anew
#if(previouserror-error<epsilon):
if(np.sqrt(error)/tl.norm(X,2)<epsilon):
Aresult=Aold
break
return Aresult
def ComputeCosineDistance(A,B):
[N,M]=np.array(A.shape,dtype=int)
res=[]
for n in range(N):
if ((np.linalg.norm(A[n,:])!=0) and (np.linalg.norm(B[n,:])!=0)):
cosinus=scipy.spatial.distance.cosine(A[n,:],B[n,:])
#cosinus=scipy.spatial.distance.correlation(A[:,m],B[:,m])
#print("The value of cosinus is")
#print(cosinus)
res.append(cosinus)
return np.mean(np.array(res))
def ComputeCosineDistanceSet(Listofactorsa,Listoffactorsb):
N=len(Listofactorsa)
res=[]
for n in range(N):
A=Listofactorsa[n]
B=Listoffactorsb[n]
res.append(ComputeCosineDistance(A,B))
return np.array(res)
def CheckSameSubspaces(A,B):#Compare the subspaces generated by the two matrices
Qa=np.linalg.qr(A)[0]
Qb=np.linalg.qr(B)[0]
return Qa,Qb
def CheckSameSubspacesSet(Listofactorsa,Listoffactorsb):
N=len(Listofactorsa)
res=[]
for n in range(N):
Qa=Listofactorsa[n]
Qb=Listoffactorsb[n]
res.append(np.linalg.norm(Qa-Qb))
return np.array(res)
def Tensor_matrixproduct(X,listoffactors):#The parameters are tensors(tensor and matrices)
Res=tl.tensor(np.copy(mxnet_backend.to_numpy(X)))
mode=-1
for matrix in listoffactors:
mode=mode+1
Res=tenalg.mode_dot(Res,matrix,mode)
return Res
def Sparse_code(X,G_init,listoffactors,Nonnegative,step,max_iter,alpha,theta,epsilon):#The parameters are tensors
#This function is used to perform the sparse coding step
#All the tensor and parameters are of tensor type
#G_new=T.tensor(np.copy(mxnet_backend.to_numpy(G_init)))
G_new=tl.tensor(G_init)
G_old=tl.tensor(np.zeros(G_new.shape))
G_result=tl.tensor(np.zeros(G_new.shape))
Lambda=alpha*theta
error=np.power(tl.norm(X-Tensor_matrixproduct(G_new,listoffactors),2),2)+Lambda*tl.norm(G_new,1)
#previous_error=0
nb_iter=0
error_list=[error]
while(nb_iter<=max_iter):
nb_iter=nb_iter+1
#previous_error=error
G_old=G_new
G_new=G_old-step*derivativeCore(X,G_old,listoffactors)
if(Nonnegative==True):
G_new=tl.tensor(np.maximum(mxnet_backend.to_numpy(G_old-step*(derivativeCore(tl.tensor(X),G_old,listoffactors)))+alpha*theta*np.ones(G_old.shape),0))
if(Nonnegative==False):
G_new=Proximal_operator(G_new,step)
error=np.power(tl.norm(X-Tensor_matrixproduct(G_new,listoffactors),2),2)#+Lambda*T.norm(G_new,1)
G_result=G_new
error_list.append(error)
#if(np.abs(previous_error-error)/error<epsilon):
if(np.sqrt(error)/tl.norm(X,2)<epsilon):
G_result=G_old
error_list=error_list[0:len(error_list)-1]
break
return G_result,error_list,nb_iter
def TuckerBatch(X,Coretensorsize,max_iter,listoffactorsinit,Ginit,Nonnegative,Reprojectornot,alpha,theta,step,epsilon):
N=len(list(X.shape))
listoffactorsnew=list(listoffactorsinit)
listoffactorsnew=Operations_listmatrices(listoffactorsnew,"Tensorize")
listoffactorsold=[]
listoffactorsresult=[]
Gnew=tl.tensor(np.copy(Ginit))
Gold=tl.tensor(np.zeros(Ginit.shape))
Gresult=tl.tensor(np.zeros(Ginit.shape))
error=np.power(tl.norm(tl.tensor(X)-Tensor_matrixproduct(Gnew,listoffactorsnew),2),2)#+alpha*theta*T.norm(Gnew,1)+alpha*(1-theta)*np.sum(Operations_listmatrices(listoffactorsnew[1:N],"NormII"))
nbiter=0
errorlist=[error]
while (nbiter<max_iter):
print("We are in batch")
nbiter=nbiter+1
listoffactorsold=listoffactorsnew
Gold=Gnew
Gnew=Sparse_code(tl.tensor(X),Gold,listoffactorsold,Nonnegative,step,max_iter,alpha,theta,epsilon)[0]
for n in range(N-1):
Aold=listoffactorsnew[n+1]
Anew=Factorupdateproblem(tl.tensor(X),Gnew,Aold,listoffactorsnew,alpha,theta,n+1,max_iter,epsilon)
listoffactorsnew[n+1]=Anew
error=np.power(tl.norm(tl.tensor(X)-Tensor_matrixproduct(Gnew,listoffactorsnew),2),2)#+alpha*theta*T.norm(Gnew,1)+alpha*(1-theta)*np.sum(Operations_listmatrices(listoffactorsnew[1:N],"NormII"))
errorlist.append(error)
listoffactorsresult=listoffactorsold
Gresult=Gnew
if(np.sqrt(error)/tl.norm(tl.tensor(X),2)<epsilon):
listoffactorsresult=listoffactorsold
Gresult=Gold
errorlist=errorlist[0:len(errorlist)-1]
break
#print(errorlist)
if(Reprojectornot==True):
return Gresult,listoffactorsresult,errorlist,nbiter
if(Reprojectornot==False):
return listoffactorsresult,errorlist,nbiter
def Sparse_codingSingle(args):
#This function is used to infer the activation coefficients for a single tensor
#All the parameters are of tensor type
G_result,error_list,nb_iter=Sparse_code(args[0][args[9]],args[1][args[9]],args[2],args[3],args[4],args[5],args[6],args[7],args[8])
return G_result
def Sparse_coding(X,G,listoffactors,Nonnegative,Setting,step,max_iter,alpha,theta,epsilon,pool):#The parameters are tensors
#This function is used to infer the activation coefficients for eiter a single or a sequence of tensors
#All the tensors are of tensor type
if(Setting=="Single"):
G_result,error_list,nb_iter=Sparse_code(X,G,listoffactors,Nonnegative,step,max_iter,alpha,theta,epsilon)
return G_result
if(Setting=="MiniBatch"):
L=len(X)
Gtemp=pool.map(Sparse_codingSingle,[[X,G,listoffactors,Nonnegative,step,max_iter,alpha,theta,epsilon,l] for l in range(L)])
G_result=[]
for Goutput in Gtemp:
G_result.append(Goutput)
return G_result
def Compute_test_error(X_test,G_init,listoffactors,Nonnegative,Setting,step,max_iter,alpha,theta,epsilon,pool):#The parameters are tensors
G_test=Sparse_coding(X_test,G_init,listoffactors,Nonnegative,Setting,step,max_iter,alpha,theta,epsilon,pool)
Test_error=Error(X_test,G_test,listoffactors,Setting,pool)
return Test_error
def CheckSubspace(listoffactors1,listoffactors2):
N=len(listoffactors1)
result=np.zeros(N)
for n in range(N):
Q1=listoffactors1[n]
Q2=listoffactors2[n]
result[n]=np.linalg.norm(np.dot(Q1,Q1.T)-np.dot(Q2,Q2.T))
return result
def Mean_relative_errorsingle(args):
error=np.power(tl.norm(args[0][args[3]]-Tensor_matrixproduct(args[1][args[3]],args[2]),2),2)
error=error/np.power(tl.norm(args[0][args[3]],2),2)
return error
def Mean_relative_error(X,G,listoffactors,setting,pool):
if(setting=="Single"):
return np.power(tl.norm(X-Tensor_matrixproduct(G,listoffactors),2),2)/np.power(tl.norm(X,2),2)
if(setting=="MiniBatch"):
Mean_errorslist=pool.map(Mean_relative_errorsingle,[[X,G,listoffactors,l] for l in range(len(X))])
return np.mean(np.array(Mean_errorslist))
def Split_into_two_subsets(X_set,trainratio):
nbtrainsamples=int(trainratio*len(X_set))
Xtrainset=X_set[0:nbtrainsamples]
Xtestset=X_set[nbtrainsamples:len(X_set)]
return Xtrainset,Xtestset
def ExperimentToyGeneral(etavalues,Nonnegative,Numberofexamples,Minibatchsize,max_iter,step,alpha,theta,nbepochs,randomarray,trainratio,period,pool):
L=len(etavalues)
Nbmean=len(randomarray)
RMSEalto=np.zeros(L)
StdaltoRMSE=np.zeros(L)
Fittingalto=np.zeros(L)
MREalto=np.zeros(L)
Stdaltofitting=np.zeros(L)
Stdaltomre=np.zeros(L)
for l in range(L):
eta=etavalues[l]
Stdaltormselist=[]
Stdaltofittlist=[]
Stdaltomrelist=[]
#for m in range(Nbmean):
for k in range(len(randomarray)):
m=randomarray[k]
print("The noise number is")
print(k+1)
X_set=np.maximum(GenerateTensorsGeneral(Numberofexamples,eta,m),0)
Xtrain_set,Xtest_set=Split_into_two_subsets(X_set,trainratio)
Xtrain=np.zeros((len(Xtrain_set),30,40,50))
Xtest=np.zeros((len(Xtest_set),30,40,50))
for t in range(len(Xtrain_set)):
Xtrain[t,:,:,:]=Xtrain_set[t]
for t in range(len(Xtest_set)):
Xtest[t,:,:,:]=Xtest_set[t]
Xtest_set=Operations_listmatrices(Xtest_set,"Tensorize")
#sigma=1/2
#Noise=np.random.normal(loc=0,scale=sigma,size=(len(Xtrain_set),30,40,50))
#Noise=np.random.rand(len(Xtrain_set),30,40,50)
epsilon=np.power(10,-15,dtype=float)
Coretensorsize=np.array([len(Xtrain_set),eta,eta,eta])# The first dimensions must be equal for mathematical coherence purpose
Ginittrain=np.random.normal(loc=0,scale=1/10,size=([len(Xtrain_set),eta,eta,eta])) #np.random.normal(loc=0,scale=1/100,size=Coretensorsize)
Ginittest=np.random.normal(loc=0,scale=1/10,size=(len(Xtest_set),eta,eta,eta))
Pre_existingG_settrain=[]
for n in range(len(Xtrain_set)):
Pre_existingG_settrain.append(Ginittrain[n,:,:,:])
Pre_existingG_settest=[]
for n in range(len(Xtest_set)):
Pre_existingG_settest.append(Ginittest[n,:,:,:])
Ltest=len(Xtest_set)
Pre_existingfactors=[np.random.normal(loc=0,scale=1/100,size=(30,eta)),np.random.normal(loc=0,scale=1/100,size=(40,eta)),np.random.normal(loc=0,scale=1/100,size=(50,eta))]
K=10 #5
Coretensorsize=np.array([eta,eta,eta])
Gresult4,listoffactorsresult4=ALTO_setWithpredefinedEpochs(Xtrain_set,Coretensorsize,Pre_existingfactors,K,pool,1,nbepochs)
Gtest4=Sparse_coding(Xtest_set,Operations_listmatrices(Pre_existingG_settest,"Tensorize"),listoffactorsresult4,Nonnegative,"MiniBatch",step,max_iter,alpha,theta,epsilon,pool)
error4=Error(Xtest_set,Gtest4,listoffactorsresult4,"MiniBatch",pool)
fittingerror4=error4/Ltest
rmse4=np.sqrt(error4/Ltest)
mreminialto=Mean_relative_error(Xtest_set,Gtest4,Operations_listmatrices(listoffactorsresult4,"Tensorize"),"MiniBatch",pool)
RMSEalto[l]= RMSEalto[l]+rmse4
Fittingalto[l]=Fittingalto[l]+fittingerror4
MREalto[l]=MREalto[l]+mreminialto
Stdaltormselist.append(rmse4)
Stdaltofittlist.append(fittingerror4)
Stdaltomrelist.append(mreminialto)
print("The value of eta is")
print(eta)
RMSEalto[l]=RMSEalto[l]/Nbmean
print("The root mean sqaure errors RMSEs are")
print(RMSEalto[l])
StdaltoRMSE[l]=np.std(np.array(Stdaltormselist))
print("The standard deviations associated to the RMSEs are")
print(StdaltoRMSE[l])
Fittingalto[l]=Fittingalto[l]/Nbmean
print("The fitting errors FEs are")
print(Fittingalto[l])
Stdaltofitting[l]=np.std(np.array(Stdaltofittlist))
print("The standard deviations associated to the FEs are")
print(Stdaltofitting[l])
MREalto[l]=MREalto[l]/Nbmean
print("The mean relative errors MRE are")
print(MREalto[l])
Stdaltomre[l]=np.std(Stdaltomrelist)
print("The standard deviation associated to the MREs are")
print(Stdaltomre[l])
pdb.set_trace()
return RMSEalto,StdaltoRMSE,Fittingalto,Stdaltofitting,MREalto,Stdaltomre
etavalues=[5]#10,15,20,25]
nbepochs=1
Nonnegative=False
step=np.power(10,-5,dtype=float) #np.power(10,-8,dtype=float)
max_iter=20
Numberofexamples=12
Minibatchsize=[]
alpha=np.power(10,2,dtype=float)
theta=np.power(10,-2,dtype=float)
period=int(max_iter/3)
randomarray=[1,2,3]
trainratio=1/3
pool=Pool(5) #,initializer=MethodsTSPen.pool_init,maxtasksperchild=100)
RMSEalto,StdaltoRMSE,Fittingalto,Stdaltofitting,MREalto,Stdaltomre=ExperimentToyGeneral(etavalues,Nonnegative,Numberofexamples,Minibatchsize,max_iter,step,alpha,theta,nbepochs,randomarray,trainratio,period,pool)
| null |
Toyparametersmeasurement/ParametersmeasurementALTO.py
|
ParametersmeasurementALTO.py
|
py
| 18,924 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.path.append",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "tensorly.set_backend",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "numpy.maximum",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "tensorly.norm",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "tensorly.norm",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "tensorly.tensor",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "tensorly.backend.mxnet_backend.to_numpy",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "tensorly.backend.mxnet_backend",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "numpy.sign",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "numpy.maximum",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "tensorly.tensor",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "tensorly.backend.mxnet_backend.to_numpy",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "tensorly.backend.mxnet_backend",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "numpy.copy",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "tensorly.backend.mxnet_backend.to_numpy",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "tensorly.backend.mxnet_backend",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "tensorly.tensor",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "tensorly.backend.mxnet_backend.to_numpy",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "tensorly.backend.mxnet_backend",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "tensorly.tensor",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "tensorly.norm",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "tensorly.norm",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "tensorly.tensor",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "tensorly.tensor",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "tensorly.tensor",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "tensorly.tensor",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "numpy.identity",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "tensorly.tensor",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "numpy.identity",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "tensorly.base.unfold",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "tensorly.tensor",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "tensorly.backend.mxnet_backend.to_numpy",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "tensorly.backend.mxnet_backend",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "tensorly.base.unfold",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "tensorly.tensor",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "tensorly.tensor",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "tensorly.tensor",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "tensorly.norm",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "tensorly.norm",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "tensorly.norm",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "tensorly.norm",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "scipy.spatial.distance.cosine",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "scipy.spatial",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "numpy.mean",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.qr",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 200,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.qr",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 201,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "tensorly.tensor",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "tensorly.backend.mxnet_backend.to_numpy",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "tensorly.backend.mxnet_backend",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "tensorly.tenalg.mode_dot",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "tensorly.tenalg",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "tensorly.tensor",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "tensorly.tensor",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "tensorly.tensor",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "tensorly.norm",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "tensorly.tensor",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "numpy.maximum",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "tensorly.backend.mxnet_backend.to_numpy",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "tensorly.backend.mxnet_backend",
"line_number": 248,
"usage_type": "name"
},
{
"api_name": "numpy.ones",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "tensorly.norm",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "tensorly.norm",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "tensorly.tensor",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "tensorly.tensor",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "tensorly.tensor",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "tensorly.norm",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "tensorly.tensor",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "tensorly.tensor",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "tensorly.tensor",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "tensorly.norm",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "tensorly.tensor",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "tensorly.norm",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "tensorly.tensor",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 351,
"usage_type": "attribute"
},
{
"api_name": "numpy.dot",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "tensorly.norm",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "tensorly.norm",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "tensorly.norm",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 379,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "numpy.maximum",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "MiscellaneousFunctions.MethodsTSPen.GenerateTensorsGeneral",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 424,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 426,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 427,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.normal",
"line_number": 429,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 429,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.normal",
"line_number": 444,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 444,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 449,
"usage_type": "call"
},
{
"api_name": "MiscellaneousFunctions.ALTO.ALTO_setWithpredefinedEpochs",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 459,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 498,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 498,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 509,
"usage_type": "call"
},
{
"api_name": "pdb.set_trace",
"line_number": 513,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 523,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 527,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 528,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 532,
"usage_type": "call"
}
] |
343703098
|
from damselfly.models import cnn1d_complex
from damselfly.data import loaders, augmentation
from damselfly.utils import train
from pathlib import Path
import torch.nn as nn
import torch
import numpy as np
#import argparse
#parser = argparse.ArgumentParser()
#parser.add_argument('--epoch', dest='epoch', action='store', default=1, type=int)
#parser.add_argument('--batchsize', dest='batchsize', action='store', default=500, type=int)
#parser.add_argument('--lr', dest='lr', action='store', default=1e-3, type=float)
#parser.add_argument('--pitch-min', dest='pitch_min', action='store', type=float)
#parser.add_argument('--pitch-max', dest='pitch_max', action='store', type=float)
#parser.add_argument('--energy-min', dest='energy_min', action='store', default=18575, type=int)
#parser.add_argument('--energy-max', dest='energy_max', action='store', default=18580, type=int)
#parser.add_argument('--radius-min', dest='radius_min', action='store', default=0.005, type=int)
#parser.add_argument('--radius-max', dest='radius_max', action='store', default=0.005, type=int)
#parser.add_argument('--train-data', dest='train_data', action='store',)
#parser.add_argument('--test-data', dest='test_data', action='store',)
#parser.add_argument('--name', dest='name', action='store',)
# model configuration
#parser.add_argument('--kernels', nargs='+', dest='kernels', action='store', type=int)
#parser.add_argument('--channels', nargs='+', dest='channels', action='store', type=int)
#parser.add_argument('--strides', nargs='+', dest='strides', action='store', type=int)
#parser.add_argument('--linear', nargs='+', dest='linear', action='store', type=int)
#args = parser.parse_args()
checkpoint_path = Path.home()/'group'/'project'/'scripting'/'output'/\
'220825_test_cnn1d_complex_training'
checkpoint_path.mkdir(exist_ok=True, parents=True)
checkpoint_name = 'run0.tar'
datapath = Path.home()/'group'/'project'/'datasets'/'data'/\
'220609_dl_test_data_85to88deg_18575to18580ev_5mm_random.h5'
conv_dict = {
'channels':[1,10,15,25,35],
'kernels':[4,4,2,2],
'strides':[4,4,2,2],
'act': cnn1d_complex.ComplexLeakyRelu
}
linear_dict = {
'sizes': [cnn1d_complex.output_size(conv_dict, 8192),1024,256,2],
'act': cnn1d_complex.ComplexLeakyRelu,
}
model_args = (conv_dict, linear_dict)
model = cnn1d_complex.ComplexCNN(*model_args)
lr = 1e-3
opt_args = {
'args':model.parameters(),
'kwargs':{'lr':lr}
}
optimizer = torch.optim.Adam(opt_args['args'], **(opt_args['kwargs']))
loss_fcn = nn.CrossEntropyLoss()
print('Loading Data')
train_data, val_data = loaders.LoadH5ParamRange(
path=datapath,
target_energy_range=(18575, 18580),
target_pitch_range=(86.7, 88.0),
target_radius_range=(0.005, 0.005),
val_split=True,
val_ratio=0.2,
randomize_phase=True,
copies=2,
)
train_data = train_data.unsqueeze(dim=1)
val_data = val_data.unsqueeze(dim=1)
train_data = augmentation.FFT(train_data)
val_data = augmentation.FFT(val_data)
norm_train_data = augmentation.NormBatchComplex(train_data)
#fig = plt.figure()
#ax = fig.add_subplot(1,1,1)
#ax.plot(abs(norm_train_data[0, 0, :].numpy()))
#plt.savefig('test0')
train_data = torch.cat(
(train_data, torch.zeros(train_data.shape, dtype=torch.cfloat)), dim=0
)
val_data = torch.cat(
(val_data, torch.zeros(val_data.shape, dtype=torch.cfloat)), dim=0
)
train_labels = torch.zeros(train_data.shape[0], dtype=torch.long)
train_labels[0:train_data.shape[0]//2] = 1
val_labels = torch.zeros(val_data.shape[0], dtype=torch.long)
val_labels[0:val_data.shape[0]//2] = 1
train_data = (train_data, train_labels)
val_data = (val_data, val_labels)
config = {
'batchsize': 2500,
'epochs': 250,
'checkpoint_epochs': 25,
'checkpoint': checkpoint_path/checkpoint_name,
'initial_epoch': 0,
'loss': [],
'acc': [],
'val_acc': [],
'model_args': model_args,
'opt_args': opt_args,
}
noise_var = 1.38e-23*10*50*60*205e6/8192
print('Training starting')
train.TrainModel(0, model, optimizer, loss_fcn,\
train_data, val_data, config, noise_gen=augmentation.AddNoiseComplex,\
noise_gen_args=[noise_var], data_norm=augmentation.NormBatchComplex
)
| null |
roar/job/deep_filter/220818_train_cnn1d_complex_model_interactive_v1.py
|
220818_train_cnn1d_complex_model_interactive_v1.py
|
py
| 4,172 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pathlib.Path.home",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "pathlib.Path.home",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "damselfly.models.cnn1d_complex.ComplexLeakyRelu",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "damselfly.models.cnn1d_complex",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "damselfly.models.cnn1d_complex.output_size",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "damselfly.models.cnn1d_complex",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "damselfly.models.cnn1d_complex.ComplexLeakyRelu",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "damselfly.models.cnn1d_complex",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "damselfly.models.cnn1d_complex.ComplexCNN",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "damselfly.models.cnn1d_complex",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "damselfly.data.loaders.LoadH5ParamRange",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "damselfly.data.loaders",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "damselfly.data.augmentation.FFT",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "damselfly.data.augmentation",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "damselfly.data.augmentation.FFT",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "damselfly.data.augmentation",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "damselfly.data.augmentation.NormBatchComplex",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "damselfly.data.augmentation",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "torch.cfloat",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "torch.cat",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "torch.cfloat",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "torch.zeros",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "torch.zeros",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "damselfly.utils.train.TrainModel",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "damselfly.utils.train",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "damselfly.data.augmentation.AddNoiseComplex",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "damselfly.data.augmentation",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "damselfly.data.augmentation.NormBatchComplex",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "damselfly.data.augmentation",
"line_number": 124,
"usage_type": "name"
}
] |
80886343
|
import requests
import time
import re
#删除HTML文档中的标签.
#实际功能:将HTMLtext中所有的word删除
def remove(htmltext, word):
text = htmltext[:]
while text.find(word) != -1:
a = text.find(word)
text = text[:a]+text[a+len(word):]
return text
#爬取数据
def catchData(word):
#声明请求头,伪装成浏览器
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
}
#我们要请求的地址
newUrl = "https://hanyu.baidu.com/s?wd="+word+"+意思&from=zici"
#获得请求信息
response = requests.get(newUrl, headers=headers)
#获取HTML页面
html = response.content.decode("utf-8")
#如果这是一个字:
if len(word) == 1:
#匹配:
r = re.search(
"\<div class\=\"tab\-content\"\>.*?\<dl\>.*?\<dd\>(.*?)<\/dd\>", html, re.DOTALL)
#否则
else:
#另一种匹配
r = re.search(
"\<div class\=\"tab\-content\"\>.*?\<dt class\=\"pinyin\"\>.*?\<dd\>(.*?)<\/dd\>", html, re.DOTALL)
#如果检索到结果:
if r:
#获得目标内容
t = (r.group(1))
#删除多余标签
t = remove(t, '<p>')
t = remove(t, '</p>')
t = remove(t, '<span>')
t = remove(t, '</span>')
#过滤空格
t = list(t)
n = ""
for i in t:
if i not in (" ", "\n"):
n += i
#如果没有匹配到相关内容
else:
#没有相关信息
n = "no file"
#返回查询结果
return n
f = input("请输入要查找的字词(中间请用空格隔开):")
wordList = f.split(" ")
ysList = []
for i in wordList:
#搜索每一个词语
ysList.append(catchData(i))
time.sleep(0.5)
#将结果转换成文本
n = ""
for i, j in zip(wordList, ysList):
n += i+":"+j+"\n\n"
print(n)
#写入文件
f = open('out.txt', 'w')
f.write(n)
f.close()
| null |
第一节/程序/语文作业预习查词器.py
|
语文作业预习查词器.py
|
py
| 2,026 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.get",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "re.DOTALL",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "re.search",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "re.DOTALL",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 68,
"usage_type": "call"
}
] |
53980940
|
'''
A script will calculate days that fill dividend in this year,
if not fill dividend in this year will return -1
'''
import os
import json
import argparse
import numpy as np
import pandas as pd
import datetime
from src.Loader import get_data
with open("./config.json") as f:
config = json.load(f)
def _filter_data(df):
if not df[~df['Ex_dividend_transaction_day'].isnull()].empty:
index = df[~df['Ex_dividend_transaction_day'].isnull()].index[0] - 1
tmp_df = df[df.index>=index]
else:
tmp_df = pd.DataFrame(columns=df.columns)
return tmp_df
def _cal_fill_div_day(price_list):
original_price = price_list[0]
for i, s in enumerate(price_list[1:]):
if s >= original_price:
return i
return -1
def cal_stock_fill_div_days(stock_id):
div_data = get_data(
dataset_name='TaiwanStockStockDividend', stock_id=[stock_id])
price_data = get_data(
dataset_name='TaiwanStockPrice', stock_id=[stock_id])
if not (price_data.empty or div_data.empty):
div_data['year'] = div_data['date'].map(lambda x: x.split('-')[0])
price_data['year'] = price_data['date'].map(lambda x: x.split('-')[0])
mge = pd.merge(
price_data[['date', 'year','stock_id', 'close']],
div_data[['stock_id','Ex_dividend_transaction_day']],
left_on=['date', 'stock_id'], right_on=['Ex_dividend_transaction_day', 'stock_id'], how='left')
mge_gby = mge.groupby(['stock_id', 'year'], as_index=False).apply(_filter_data).reset_index(drop=True)
mge_gby = mge_gby.groupby(['stock_id', 'year'], as_index=False).agg({'close': lambda x: list(x)})
mge_gby['fill_days'] = mge_gby['close'].map(lambda x: _cal_fill_div_day(x))
return mge_gby[['stock_id', 'year', 'fill_days']]
else:
return pd.DataFrame(columns=['stock_id', 'year', 'fill_days'])
def main(args):
result = []
for stock_id in args.stock_id_list:
print('Process stock:{}'.format(stock_id))
stock_fill_day = cal_stock_fill_div_days(stock_id)
result.append(stock_fill_day)
result = pd.concat(result, axis=0)
result.to_csv(os.path.join(config['DirPath']['ReportData'], 'stock_fill_div_days.csv'), index=False)
if __name__ == '__main__':
stock_id_list = ['2002', '2891', '2881', '2884', '2882', '2887', '2886', '5880', '2834']
parser = argparse.ArgumentParser()
parser.add_argument(
"--stock_id_list",
type=str,
default=stock_id_list,
help="A list of stock id to calculate fill div days"
)
main(parser.parse_args())
| null |
Code/FinMining/fill_div_days.py
|
fill_div_days.py
|
py
| 2,623 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "json.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "src.Loader.get_data",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "src.Loader.get_data",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pandas.merge",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 68,
"usage_type": "call"
}
] |
124146990
|
import glob
import os
import pathlib2
import argparse
import subprocess as sp
__dir__ = pathlib2.Path(__file__).parent
def main(path: pathlib2.Path):
target_dir = path.parent
toc_py = path / 'toc.py'
gen_md_py = path / 'generate_readme.py'
if toc_py.exists():
print("custom toc.py exists")
else:
toc_py = __dir__ / 'toc.py'
if gen_md_py.exists():
print("custom generate_readme.py exists")
else:
gen_md_py = __dir__ / 'generate_readme.py'
try:
sp.run(f"python {str(toc_py)} --input {str(target_dir)} > {str(path / 'toc.txt')}", shell=True)
except Exception as e:
print(e)
try:
sp.run(
f"python {str(gen_md_py)} --input {str(path / 'toc.txt')} --template_directory {str(path)} > "
f"{target_dir / 'README.md'}",
shell=True)
except Exception as e:
print(e)
os.remove(str(path / 'toc.txt'))
if __name__ == '__main__':
parser = argparse.ArgumentParser("md-toc parser")
parser.add_argument("--custom_dir", default=".md-toc", help="custom md-toc directory")
args = parser.parse_args()
print("Input Parameters")
for k, v in args.__dict__.items():
print(f"{k}: {v}")
root = pathlib2.Path(__file__).parent
print(f"INFO: root: {root}")
md_toc_paths = glob.glob(f"**/{args.custom_dir}", recursive=True)
md_toc_paths = [root / path for path in md_toc_paths]
if len(md_toc_paths) == 0:
print(f"WARNING: No {args.custom_dir} Found")
for path in md_toc_paths:
assert path.exists()
print(f"INFO: Current Path: {path}")
main(path)
| null |
main.py
|
main.py
|
py
| 1,651 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pathlib2.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pathlib2.Path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "subprocess.run",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pathlib2.Path",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 46,
"usage_type": "call"
}
] |
614127568
|
import time
from webbot import Browser
from selenium.common.exceptions import NoSuchElementException
#%%
#open a specific group hangout
def open_group_hangout(web, groupName, waitTime1):
#get out of any iframes
web.driver.switch_to.default_content()
#get into conversations iframe
iframe_pls=web.driver.find_elements_by_xpath("//iframe[@aria-label='Contacts and conversations']")
iframe_id=iframe_pls[0].get_attribute("id")
iframe_correct=web.driver.find_element_by_id(iframe_id)
time.sleep(waitTime1)
web.driver.switch_to.frame(iframe_correct)
#open specific group by groupName
web.driver.find_element_by_css_selector("[title*='"+groupName+"']").click()
time.sleep(waitTime1)
#get out of conversations iframe
web.driver.switch_to.default_content()
return web
#%%
#start a call for a group hangout (that has already been opened)
def call_group_hangout(web, groupName, waitTime1):
#get out of any iframes
web.driver.switch_to.default_content()
#get into iframe
iframe_pls=web.driver.find_elements_by_xpath("//iframe[@aria-label='" +groupName+ "']")
iframe_id=iframe_pls[0].get_attribute("id")
iframe_correct=web.driver.find_element_by_id(iframe_id)
time.sleep(waitTime1)
web.driver.switch_to.frame(iframe_correct)
#click video call
web.driver.find_element_by_css_selector("[title*='Video call. Click to start a video call.']").click()
time.sleep(waitTime1)
#get out of groupName specific hagnout iframe
web.driver.switch_to.default_content()
return web
#%%
#exit an already open group hangout
def exit_group_hangout(web, groupName, waitTime1):
#get out of any iframes
web.driver.switch_to.default_content()
#get into iframe
iframe_pls=web.driver.find_elements_by_xpath("//iframe[@aria-label='" +groupName+ "']")
iframe_id=iframe_pls[0].get_attribute("id")
iframe_correct=web.driver.find_element_by_id(iframe_id)
time.sleep(waitTime1)
web.driver.switch_to.frame(iframe_correct)
#click to exist specific hangout iframe
web.driver.find_element_by_xpath("//button[@class='gGnOIc tV qp SD p7oPo JPiKic']").click()
#get out of specific group hangout iframe
web.driver.switch_to.default_content()
return web
#%%
#write in an already open group hangout
#message is a string
def write_in_group_hangout(web, groupName, waitTime1, message):
#get out of any iframes
web.driver.switch_to.default_content()
# #get into iframe
iframe_pls=web.driver.find_elements_by_xpath("//iframe[@aria-label='" +groupName+ "']")
iframe_id=iframe_pls[0].get_attribute("id")
iframe_correct=web.driver.find_element_by_id(iframe_id)
time.sleep(waitTime1)
web.driver.switch_to.frame(iframe_correct)
#click hangout text input box
web.driver.find_element_by_xpath("//div[@class='vE dQ editable']").click()
#type in message string
web.type(message)
time.sleep(waitTime1)
#enter message into hangout groupchat
web.press(web.Key.ENTER)
time.sleep(waitTime1)
#get out of specific group hangout iframe
web.driver.switch_to.default_content()
return web
#%%
#add a person to an existing already open group hangout
def add_to_group_hangout(web, groupName, waitTime1, email):
#get out of any iframes
web.driver.switch_to.default_content()
#get into iframe
iframe_pls=web.driver.find_elements_by_xpath("//iframe[@aria-label='" +groupName+ "']")
iframe_id=iframe_pls[0].get_attribute("id")
iframe_correct=web.driver.find_element_by_id(iframe_id)
time.sleep(waitTime1)
web.driver.switch_to.frame(iframe_correct)
#click hangout people button
web.driver.find_element_by_xpath("//div[@class='dwrYTb PK']").click()
#click Add people button
web.click('Add people')
#type the email string
web.type(email)
time.sleep(waitTime1*2)
#cick the email to add
try:
#for gmails click the gmail
element=web.driver.find_element_by_xpath("//li[@class='eh XcEgrf fp pu hy']").click()
print(email + " added")
except NoSuchElementException:
#for non-gmails?? click the non gmails
print("No element found. Trying again...")
element=web.driver.find_element_by_xpath("//li[@class='eh XcEgrf fp pu hy c-P-p lebsnd Tb']").click()
time.sleep(waitTime1)
#click Add people button to finish adding
web.click('Add people')
#get out of specific group hangout iframe
web.driver.switch_to.default_content()
return web
#%%
def get_call_url(web,groupName,waitTime1):
#get into iframe
iframe_pls=web.driver.find_elements_by_xpath("//iframe[@aria-label='" +groupName+ "']")
iframe_id=iframe_pls[0].get_attribute("id")
iframe_correct=web.driver.find_element_by_id(iframe_id)
time.sleep(waitTime1)
web.driver.switch_to.frame(iframe_correct)
#click video call
web.driver.find_element_by_css_selector("button[title='Video call. Click to start a video call.']").click()
#switch to video call
web.driver.switch_to.window(web.driver.window_handles[1])
#get call url
call_url=web.get_current_url()
#exit call
element = web.driver.find_elements_by_css_selector("span[class='DPvwYc']")[2]
web.driver.execute_script("arguments[0].click();", element)
#switch back to original window
web.driver.switch_to.window(web.driver.window_handles[0])
#get back out of iframe
web.driver.switch_to.default_content()
return web, call_url
| null |
Old Files/VirtualVisitas-FirstMultiThreadingUsed/hangout_tools.py
|
hangout_tools.py
|
py
| 5,629 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "time.sleep",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "selenium.common.exceptions.NoSuchElementException",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 141,
"usage_type": "call"
}
] |
619140919
|
from south.db import db
from django.db import models
from noc.core.model.fields import PickledField
class Migration:
def forwards(self):
# Model 'Language'
db.create_table('main_changesquarantine', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('timestamp', models.DateTimeField("Timestamp",auto_now_add=True)),
('changes_type', models.CharField("Type",max_length=64)),
('subject', models.CharField("Subject",max_length=256)),
('data', PickledField("Data")),
))
db.create_table('main_changesquarantinerule', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('name', models.CharField("Name",max_length=64,unique=True)),
('is_active', models.BooleanField("Is Active",default=True)),
('changes_type', models.CharField("Type",max_length=64)),
('subject_re', models.CharField("Subject",max_length=256)),
('action', models.CharField("Action",max_length=1,choices=[("I","Ignore"),("A","Accept"),("Q","Quarantine")])),
('description', models.TextField("Description",null=True,blank=True)),
))
db.send_create_signal('main', ['ChangesQuarantine','ChangesQuarantineRule'])
def backwards(self):
db.delete_table('main_changesquarantine')
db.delete_table('main_changesquarantinerule')
| null |
main/migrations/0024_changes_quarantine.py
|
0024_changes_quarantine.py
|
py
| 1,468 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "south.db.db.create_table",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "south.db.db",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "noc.core.model.fields.PickledField",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "south.db.db.create_table",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "south.db.db",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "south.db.db.send_create_signal",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "south.db.db",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "south.db.db.delete_table",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "south.db.db",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "south.db.db.delete_table",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "south.db.db",
"line_number": 34,
"usage_type": "name"
}
] |
133965056
|
import xml.etree.ElementTree as ET
from broadsoft.requestobjects.datatypes.AccessDevice import AccessDevice
class AccessDeviceEndpoint:
def __init__(self, device_name='Generic', line_port=None, contact=None, device_level='Group'):
self.contact = contact
self.device_level = device_level
self.device_name = device_name
self.line_port = line_port
def to_xml(self):
ade = ET.Element('accessDeviceEndpoint')
if self.device_name:
ad = AccessDevice(device_name=self.device_name, device_level=self.device_level)
ade.append(ad.to_xml())
if self.line_port:
lp = ET.SubElement(ade, 'linePort')
lp.text = self.line_port
if self.contact:
c = ET.SubElement(ade, 'contact')
c.text = self.contact
return ade
| null |
broadsoft/requestobjects/datatypes/AccessDeviceEndpoint.py
|
AccessDeviceEndpoint.py
|
py
| 851 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "xml.etree.ElementTree.Element",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "broadsoft.requestobjects.datatypes.AccessDevice.AccessDevice",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 24,
"usage_type": "name"
}
] |
472190743
|
import torch
import torch.nn as nn
import torch.optim as O
import torch.nn.functional as F
import torchvision
from torchvision import datasets, models, transforms
import torchvision.transforms as transforms
import time
import copy
import numpy as np
import pandas as pd
import os
from shutil import copyfile
import OCR_model as ocr
# Function to load images for pytorch
def load_images(path):
data_path = path
# Define the transforms to be done
transformations=transforms.Compose([transforms.Resize(img_size),transforms.Grayscale(num_output_channels=1),transforms.ToTensor()])
train_dataset = datasets.ImageFolder(
root=data_path,
transform=transformations)
global class_map
class_map=train_dataset.class_to_idx
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
num_workers=0,
shuffle=True)
return train_loader
def train_model(model, dataloaders, criterion, optimizer, num_epochs=25, is_inception=False):
since = time.time()
val_acc_history = []
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for batch in dataloaders[phase]:
inputs,labels=batch
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
# Get model outputs and calculate loss
# Special case for inception because in training it has an auxiliary output. In train
# mode we calculate the loss by summing the final output and the auxiliary output
# but in testing we only consider the final output.
if is_inception and phase == 'train':
# From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958
outputs, aux_outputs = model(inputs)
loss1 = criterion(outputs, labels)
loss2 = criterion(aux_outputs, labels)
loss = loss1 + 0.4*loss2
else:
outputs = model(inputs)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
# load best model weights
model.load_state_dict(best_model_wts)
# save model
torch.save(model.state_dict(), './models_ocr/model_best')
if phase == 'val':
val_acc_history.append(epoch_acc)
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
# save model
torch.save(model.state_dict(), './models_ocr/model_best')
return model, val_acc_history
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):
# Initialize these variables which will be set in this if statement. Each of these
# variables is model specific.
model_ft = None
input_size = 0
if model_name == "resnet":
""" Resnet18
"""
model_ft = models.resnet18(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = img_size
elif model_name == "alexnet":
""" Alexnet
"""
model_ft = models.alexnet(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = img_size
elif model_name == "vgg":
""" VGG11_bn
"""
model_ft = models.vgg11_bn(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = img_size
elif model_name == "squeezenet":
""" Squeezenet
"""
model_ft = models.squeezenet1_0(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
model_ft.num_classes = num_classes
input_size = img_size
elif model_name == "densenet":
""" Densenet
"""
model_ft = models.densenet121(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
input_size = img_size
elif model_name == "inception":
""" Inception v3
Be careful, expects (299,299) sized images and has auxiliary output
"""
model_ft = models.inception_v3(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
# Handle the auxilary net
num_ftrs = model_ft.AuxLogits.fc.in_features
model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
# Handle the primary net
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs,num_classes)
input_size = img_size
else:
print("Invalid model name, exiting...")
exit()
return model_ft, input_size
model_name = "resnet"
# Number of classes in the dataset
num_classes = 10
# Batch size for training (change depending on how much memory you have)
batch_size = 32
# Number of epochs to train for
num_epochs = 100
# Flag for feature extracting. When False, we finetune the whole model,
# when True we only update the reshaped layer params
feature_extract = False
# Set image input size
img_size=(32,32)
# Store the class mapping
class_map={}
print("Initializing Datasets and Dataloaders...")
# Create training and validation datasets
image_datasets = {x: load_images('../data/mnist/'+x+'/') for x in ['train', 'val']}
# Detect if we have a GPU available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(class_map)
class_map = {v: k for k, v in class_map.items()}
class_map2=class_map
print(class_map)
# Initialize the model for this run
# model_ft, input_size = initialize_model(model_name, num_classes, feature_extract, use_pretrained=True)
model_ft = ocr.LeNet5()
# Print the model we just instantiated
print(model_ft)
# Send the model to GPU
model_ft = model_ft.to(device)
# Gather the parameters to be optimized/updated in this run. If we are
# finetuning we will be updating all parameters. However, if we are
# doing feature extract method, we will only update the parameters
# that we have just initialized, i.e. the parameters with requires_grad
# is True.
params_to_update = model_ft.parameters()
print("Params to learn:")
if feature_extract:
params_to_update = []
for name,param in model_ft.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print("\t",name)
else:
for name,param in model_ft.named_parameters():
if param.requires_grad == True:
print("\t",name)
# Observe that all parameters are being optimized
optimizer_ft = O.Adam(params_to_update, lr=0.001)
# Setup the loss fxn
criterion = nn.CrossEntropyLoss()
# Train and evaluate
model_ft, hist = train_model(model_ft, image_datasets, criterion, optimizer_ft, num_epochs=num_epochs, is_inception=(model_name=="inception"))
# # Initialize the model for this run
# net, input_size = initialize_model(model_name, num_classes, feature_extract, use_pretrained=True)
# net.load_state_dict(torch.load('./model'))
# net.eval()
# testset=load_images('data/test')
# # class_map2 = {v: k for k, v in class_map2.items()}
# print(class_map2)
# df = pd.DataFrame(columns=['image_id', 'category'])
# # Now perform the prediction
# for batch in testset:
# (data,_),(img_path,_)=batch
# prediction=net(data)
# _,prediction=torch.max(prediction.data,1)
# df_temp=pd.DataFrame()
# img_path=list(img_path)
# img_path=[im[im.rfind('/')+1:im.rfind('.')] for im in img_path]
# df_temp['image_id']=img_path
# df_temp['category']=prediction.numpy()
# df=df.append(df_temp,ignore_index=True)
# df['category'].replace(class_map2, inplace=True)
# # Sort the dataframe
# df.sort_values(by='image_id',inplace=True)
# df.to_csv('submission.csv',index=False)
# files.download('submission.csv')
| null |
Training_OCR.py
|
Training_OCR.py
|
py
| 10,432 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torchvision.transforms.Compose",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Grayscale",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets.ImageFolder",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "torch.set_grad_enabled",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "torchvision.models.resnet18",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "torchvision.models.alexnet",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "torchvision.models.vgg11_bn",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "torchvision.models.squeezenet1_0",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "torchvision.models.densenet121",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "torchvision.models.inception_v3",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 195,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "torch.device",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "OCR_model.LeNet5",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 270,
"usage_type": "name"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 273,
"usage_type": "name"
}
] |
399510628
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/11/16
# @Author : RookieDay
# @Site :
# @File : neo4j_test
# @Github : https://github.com/rookieday
# @Software: PyCharm Community Edition
# refer http://py2neo.org/2.0/
# by http://python.jobbole.com/84190/
from py2neo import Graph,Node,Relationship
test_graph = Graph(
"http://localhost:7474",
username="neo4j",
password="123456"
)
test_node_1 = Node(label = "Person",name = "test_node_1")
test_node_2 = Node(label = "Person",name = "test_node_2")
test_graph.create(test_node_1)
test_graph.create(test_node_2)
node_1_call_node_2 = Relationship(test_node_1,'CALL',test_node_2)
node_1_call_node_2['count'] = 1
node_2_call_node_1 = Relationship(test_node_2,'CALL',test_node_1)
node_2_call_node_1['count'] = 2
test_graph.create(node_1_call_node_2)
test_graph.create(node_2_call_node_1)
node_1_call_node_2['count']+=1
test_graph.push(node_1_call_node_2)
find_code_1 = test_graph.find_one(
label="Person",
property_key="name",
property_value="test_node_1"
)
print (find_code_1)
| null |
spider_ways/neo4j_test.py
|
neo4j_test.py
|
py
| 1,073 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "py2neo.Graph",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "py2neo.Node",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "py2neo.Node",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "py2neo.Relationship",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "py2neo.Relationship",
"line_number": 28,
"usage_type": "call"
}
] |
9436414
|
from collections import defaultdict
from operator import itemgetter, attrgetter, methodcaller
from sec30 import *
freq = defaultdict(int)
for li in lists:
for morph in li:
freq[morph["surface"]] += 1
ans = sorted(freq.items(), key=lambda t:(t[1],t[0]),reverse=True)
for li in ans:
print(li[0]+"\t"+str(li[1]))
| null |
sec36.py
|
sec36.py
|
py
| 332 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "collections.defaultdict",
"line_number": 5,
"usage_type": "call"
}
] |
478836492
|
# -*- coding: utf-8 -*-
from django.core.urlresolvers import NoReverseMatch, reverse
from django.utils.translation import ugettext_lazy as _
from menus.base import NavigationNode
from menus.menu_pool import menu_pool
from cms.menu_bases import CMSAttachMenu
class IconNavigationNode(NavigationNode):
def __init__(self, *args, **kwargs):
icon = kwargs.get('icon', None)
if icon:
kwargs.pop('icon')
super(IconNavigationNode, self).__init__(*args, **kwargs)
self.icon = icon
class MemberareaMenu(CMSAttachMenu):
"""Menu for the member area."""
name = _(u'Member area menu')
def get_nodes(self, request):
try:
reverse('memberarea_overview')
except NoReverseMatch:
return []
nodes = [
IconNavigationNode(_(u'Dashboard'),
reverse('memberarea_overview'), 1,
icon='dashboard'),
IconNavigationNode(_(u'Profile'),
reverse('memberarea_profile_edit'), 2,
icon='user'),
IconNavigationNode(_(u'Inventory'),
reverse('memberarea_inventory'), 3,
icon='table'),
]
if request.user.has_perm('memberarea.can_see_documents'):
nodes.append(IconNavigationNode(_(u'Documents'),
reverse('memberarea_documents'), 4,
icon='file'))
if request.user.is_staff:
nodes.append(IconNavigationNode(_(u'Administration'),
reverse('admin:index'), 5,
icon='wrench'))
nodes.append(IconNavigationNode(_(u'Logout'),
reverse('memberarea_logout'), 99,
icon='sign-out'))
return nodes
menu_pool.register_menu(MemberareaMenu)
| null |
kumo/memberarea/menu.py
|
menu.py
|
py
| 2,001 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "menus.base.NavigationNode",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "cms.menu_bases.CMSAttachMenu",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.core.urlresolvers.reverse",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.core.urlresolvers.NoReverseMatch",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.core.urlresolvers.reverse",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.core.urlresolvers.reverse",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "django.core.urlresolvers.reverse",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "django.core.urlresolvers.reverse",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.core.urlresolvers.reverse",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "django.core.urlresolvers.reverse",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "menus.menu_pool.menu_pool.register_menu",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "menus.menu_pool.menu_pool",
"line_number": 57,
"usage_type": "name"
}
] |
408438073
|
from slackbot.bot import respond_to
import requests
from firebaseint import news_api_key
API_KEY = news_api_key()
@respond_to('news from (.*)')
def news_in(message, source):
res = requests.get('https://newsapi.org/v1/articles?source={}&apiKey={}'.format(source, API_KEY)).json()
for word in message.body.get('text').split():
num = 1
if word.isdigit():
num = int(word)
break
if res.get('status') == 'ok':
for i, item in enumerate(res.get('articles')):
if i == num:
break
message.send(u'*{}* ({})'.format(item.get('title'), item.get('url')))
elif res.get('status') == 'error':
message.send('Source does not exist or isnt available')
@respond_to('^news$')
@respond_to('news sources')
def news_sources(message):
res = requests.get('https://newsapi.org/v1/sources').json()
sources_str = ''
for source in res.get('sources'):
sources_str += '*{}*, '.format(source.get('id'))
message.send(sources_str)
| null |
slackbot/plugins/news.py
|
news.py
|
py
| 933 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "firebaseint.news_api_key",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "slackbot.bot.respond_to",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "slackbot.bot.respond_to",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "slackbot.bot.respond_to",
"line_number": 26,
"usage_type": "call"
}
] |
24329900
|
import os
import yaml
import time
import Logger
import Managers
from Monitor import Monitor
config = None
config_filename = 'configuration.yaml'
scriptFolder = str(os.path.dirname(os.path.realpath(__file__)))
def LoadYAML():
global config
with open(os.path.join(scriptFolder, config_filename)) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
def SetupMonitors():
# Setup managers
commandManager = Managers.CommandManager(
config)
sensorManager = Managers.SensorManager(
config)
# Link them
commandManager.SetSensorManager(sensorManager)
sensorManager.SetCommandManager(commandManager)
# If I have not a list of monitors, I setup only a monitor
if('monitors' not in config):
monitor = Monitor(config, config, commandManager, sensorManager)
else: # More Monitors
# Now setup monitors
monitor_id = 0
for monitor_config in config['monitors']:
monitor_id += 1
monitor = Monitor(monitor_config, config, commandManager,
sensorManager, monitor_id)
# Start sensors loop
sensorManager.Start()
if __name__ == "__main__":
try:
LoadYAML()
SetupMonitors()
except Exception as exc: # Main try except to give information about exception management
logger = Logger.Logger(config)
logger.Log(Logger.LOG_ERROR, 'Main',
Logger.ExceptionTracker.TrackString(exc))
logger.Log(Logger.LOG_ERROR, 'Main',
'Try to check your configuration.yaml')
logger.Log(Logger.LOG_ERROR, 'Main',
"If problem persists, check issues (or open a new one) at 'https://github.com/richibrics/PyMonitorMQTT'")
exit(1)
| null |
main.py
|
main.py
|
py
| 1,770 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.dirname",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "yaml.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "yaml.FullLoader",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "Managers.CommandManager",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "Managers.SensorManager",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "Monitor.Monitor",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "Monitor.Monitor",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "Logger.Logger",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "Logger.LOG_ERROR",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "Logger.ExceptionTracker.TrackString",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "Logger.ExceptionTracker",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "Logger.LOG_ERROR",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "Logger.LOG_ERROR",
"line_number": 56,
"usage_type": "attribute"
}
] |
337901435
|
# -*- coding: utf-8 -*-
import collections
import warnings
import torch
import torch.nn as nn
from common.registry import registry
class Losses(nn.Module):
def __init__(self, loss_list):
super().__init__()
self.losses = []
self._evaluation_predict = registry.get("config").evaluation.predict
for loss in loss_list:
self.losses.append(OCRLoss(loss))
def forward(self, sample_list, model_output, *args, **kwargs):
output = {}
if not hasattr(sample_list, "targets"):
if not self._evaluation_predict:
warnings.warn("Sample list has not field 'targets', are you sure that your ImDB has labels? you may have wanted to run with evaluation.predict=true")
return output
for loss in self.losses:
output.update(loss(sample_list, model_output, *args, **kwargs))
registry_loss_key = "{}.{}.{}".format("losses", sample_list.dataset_name, sample_list.dataset_type)
registry.register(registry_loss_key, output) # Register the losses to registry
return output
class OCRLoss(nn.Module):
def __init__(self, params=None):
super().__init__()
if params is None:
params = {}
self.writer = registry.get("writer")
is_mapping = isinstance(params, collections.abc.MutableMapping)
if is_mapping:
if "type" not in params:
raise ValueError("Parameters to loss must have 'type' field to specify type of loss to instantiate")
else:
loss_name = params["type"]
else:
assert isinstance(params, str), "loss must be a string or dictionary with 'type' key"
loss_name = params
self.name = loss_name
loss_class = registry.get_loss_class(loss_name)
if loss_class is None:
raise ValueError(f"No loss named {loss_name} is registered to registry")
if is_mapping:
loss_params = params.get("params", {})
else:
loss_params = {}
self.loss_criterion = loss_class(**loss_params)
def forward(self, sample_list, model_output, *args, **kwargs):
loss = self.loss_criterion(sample_list, model_output, *args, **kwargs)
if not isinstance(loss, torch.Tensor):
loss = torch.tensor(loss, dtype=torch.float)
if loss.dim() == 0:
loss = loss.view(1)
key = "{}/{}/{}".format(sample_list.dataset_type, sample_list.dataset_name, self.name)
return {key: loss}
def to_contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
@registry.register_loss("sequence_cross_entropy_loss")
class SequenceCrossEntropyLoss(nn.Module):
def __init__(self, weight=None, size_average=True):
super().__init__()
self.weight = weight
self.size_average = size_average
self.loss_crossentropy = nn.CrossEntropyLoss() # nn.CrossEntropyLoss combines nn.LogSoftmax and nn.NLLLoss
def forward(self, sample_list, model_output):
scores = model_output["scores"][:, :-1] #The first predicted character is not start token. It is the real token
ann_gt_idx = torch.LongTensor(sample_list["targets"])[:, 1:].to(scores.device) # ann_gt_idx:[B, L]
loss = self.loss_crossentropy(scores.contiguous().view(-1, scores.size(-1)), ann_gt_idx.contiguous().view(-1))
return loss
| null |
modules/losses.py
|
losses.py
|
py
| 3,649 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.nn.Module",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "common.registry.registry.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "common.registry.registry",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "warnings.warn",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "common.registry.registry.register",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "common.registry.registry",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "common.registry.registry.get",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "common.registry.registry",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "collections.abc",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "common.registry.registry.get_loss_class",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "common.registry.registry",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "torch.float",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "torch.LongTensor",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "common.registry.registry.register_loss",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "common.registry.registry",
"line_number": 81,
"usage_type": "name"
}
] |
648702197
|
# -*- coding: utf8 -*-
from flask import request, session, redirect, url_for, render_template, jsonify, current_app, g
import time, random
from werkzeug.utils import secure_filename
from werkzeug.security import generate_password_hash, check_password_hash
from sqlalchemy.orm import sessionmaker
from sqlalchemy import desc
from share import app
from share.models import engine, Sharegroup, Member, Applicant, Article, Freeboard, Comment, Groupmemberlist
from groupInfo import GroupInfo
# from pusher import Pusher
# import urllib3.contrib.pyopenssl
# urllib3.contrib.pyopenssl.inject_into_urllib3()
# @app.route('/chat')
# def chat():
# return render_template('chat.html')
#
# @app.route('/send', methods=['GET'])
# def send():
# pusher = Pusher(
# app_id='121839',
# key='d5b71b591d6f4a1c4c34',
# secret='1c68c4df2403de424c9c'
# )
# chat_msg = request.args.get('msg')
# print(chat_msg)
# pusher.trigger('test_channel', 'my_event', {'message': chat_msg})
#
# return ""
@app.before_request
def name_email():
# 세션 저장
g.user_name = None
g.user_email = None
if 'logged_in' in session:
g.user_email = session['logged_in']
if 'name' in session:
g.user_name = session['name']
# main page
@app.route('/')
def index():
if 'logged_in' not in session:
return render_template('welcome.html')
Session = sessionmaker(bind=engine)
ses = Session()
# 내가 가입한 목록
grouplist = ses.query(Groupmemberlist.group_no).filter(Groupmemberlist.email == g.user_email).all()
notgrouplist = ses.query(Groupmemberlist.group_no).filter(Groupmemberlist.email != g.user_email).all()
# 가입 안한 그룹 목록
showList = list(set(notgrouplist) - set(grouplist))
i = 0
groups = [] # 가입 그룹
while i < len(grouplist):
eachgroup = ses.query(Sharegroup).filter(Sharegroup.no == grouplist[i][0]).one()
article = ses.query(Article).filter(Article.groupno == grouplist[i][0]).count()
member = ses.query(Groupmemberlist).filter(Groupmemberlist.group_no == grouplist[i][0]).count()
content = (eachgroup, article, member)
groups.append(content)
i += 1
j = 0
notgroups = [] # 미가입 그룹
while j < len(showList):
# 가입 안한 그룹 정보 전달
eachgroup = ses.query(Sharegroup).filter(Sharegroup.no == showList[j][0]).one()
article = ses.query(Article).filter(Article.groupno == showList[j][0]).count()
member = ses.query(Groupmemberlist).filter(Groupmemberlist.group_no == showList[j][0]).count()
content = (eachgroup, article, member)
notgroups.append(content)
j += 1
# 가입한 목록의 상품 목록 전달
# grouplist 가입한 그룹 목록
articles = []
k = 0
while k < len(grouplist):
g_article_list = ses.query(Article).filter(Article.groupno == grouplist[k][0]).all()
if len(g_article_list) >= 1:# 물건 없는 그룹도 있어서 >= 1
articles.append(g_article_list)
k += 1
return render_template('main.html', groups=groups, notgroups=notgroups, articles=articles)
# -------------------- Group Start ---------------------------------
# 그룹 메인 페이지
@app.route('/group', methods=['get', 'post'])
def group_list():
Session = sessionmaker(bind=engine)
ses = Session()
# g_type = 1 public, 2 private
keyword = ""
try:
keyword = request.form['keyword'] # 키워드 입력 안하면 오류나서 try except 구문 /
except:
pass
# 가입안한 그룹
join_list = ses.query(Groupmemberlist.group_no).filter(Groupmemberlist.email==g.user_email).all()
all_list = ses.query(Sharegroup.no).all()
showList = list(set(all_list) - set(join_list))
if keyword == "":
i = 0
totalList = []
while i < len(showList):
object = ses.query(Sharegroup, Member.name).filter(Sharegroup.email == Member.email).filter(Sharegroup.no == showList[i][0]).one()
totalList.append(object)
i += 1
else:
try:
# 키워드에 해당하는것 하나도 없을때 대비 예외처리
search = ses.query(Sharegroup, Member.name).filter(Sharegroup.email == Member.email).\
filter(Sharegroup.name.like('%'+keyword+'%')).all()
except:
pass
ses.close()
return render_template('group_list.html', groups=search)
ses.close()
return render_template('group_list.html', groups=totalList)
# 그룹 가입
@app.route('/group/join/<int:no>', methods=['post'])
def group_join(no=None):
Session = sessionmaker(bind=engine)
ses = Session()
gml = Groupmemberlist(request.form['email'], no)
ses.add(gml)
ses.commit()
ses.close()
return redirect(url_for('group_list'))
# 그룹 만들기
@app.route('/group_submit', methods=['post'])
def group_submit():
Session = sessionmaker(bind=engine)
ses = Session()
name = request.form['name']
if name =="":
# 이름 입력 안한 경우
return redirect(url_for('group_list'))
try:
# 이름 중복되면 다시 연결
ses.query(Sharegroup).filter(Sharegroup.name == name).one()
return redirect(url_for('group_list'))
except:
# 이름 중복 없으면 예외 발생 그리고 통과
pass
category = request.form['category']
g_type = request.form['g_type']
g_desc = request.form['groupdesc']
g_pic = request.files['grouppic']
if g_pic.filename == "":
g_picname = "Null"
else:
g_picname = name # 그룹 사진이름 == 그룹이름 (중복 없음)
g_pic.save('/home/ec2-user/project/Sharable/share/static/group_pic/'+ name)
add_group = Sharegroup(name, category, int(g_type), g.user_email, g_desc, g_picname)
ses.add(add_group)
ses.commit()
gml_no = ses.query(Sharegroup.no).filter(Sharegroup.name == name).one()
# 그룹 만들면 자동으로 그 그룹 회원으로 등록
add_gml = Groupmemberlist(g.user_email, gml_no[0])
ses.add(add_gml)
ses.commit()
ses.close()
return redirect(url_for('group_list'))
# 그룹 상세 페이지
@app.route('/group/detail/<int:no>', methods=['get'])
def group_detail_page(no):
ginfo = GroupInfo(no)
membercount = ginfo.g_member_count()
memberlist = ginfo.g_member_list()
group_detail = ginfo.sharegroup()
similar_list = ginfo.similar_group()
Session = sessionmaker(bind=engine)
ses = Session()
articlecount = ses.query(Article).filter(Article.groupno == no).count()
detail = [membercount, memberlist, group_detail, similar_list, articlecount]
return render_template("group_detail.html", detail=detail)
# 그룹 기본 정보 전달
@app.route('/group/<int:no>', methods=['get'])
def group_detail(no):
Session = sessionmaker(bind=engine)
ses = Session()
detail = ses.query(Sharegroup).filter(Sharegroup.no == no).one()
ses.close()
groupinfo = GroupInfo(no)
member_count = groupinfo.g_member_count() # 그룹 멤버 수
return jsonify(gml_groupno=member_count, share_no=detail.no, share_name=detail.name,
share_category=detail.category, share_email=detail.email, share_groupdesc=detail.groupdesc,
share_grouppic=detail.grouppic)
# check group name duplication
@app.route('/groupname/<gname>', methods=['GET'])
def duplication_check(gname=None):
Session = sessionmaker(bind=engine)
ses = Session()
try:
ses.query(Sharegroup).filter(Sharegroup.name == gname).one()
return "이미 사용중인 그룹 이름입니다."
except:
return "사용가능한 그룹 이름입니다."
finally:
ses.close()
# -------------------- Group End ----------------------------------------
# -------------------- Article Start ------------------------------------
# Load the form of write article
@app.route('/write_article')
def write_article():
Session = sessionmaker(bind=engine)
ses = Session()
# 해당 회원이 가입한 그룹 목록
grouplist = ses.query(Groupmemberlist.group_no).filter(Groupmemberlist.email == g.user_email).all()
groups = []
i = 0
while i < len(grouplist):
g_info = GroupInfo(grouplist[i][0])
groups.append(g_info.sharegroup())
i += 1
ses.close()
return render_template("write_article.html", groups=groups)
# Handling the procedure of write article
@app.route('/write_article_submit', methods=["post"])
def write_article_submit():
Session = sessionmaker(bind=engine)
ses = Session()
f1 = request.files['desc_pic1']
user_email = session['logged_in']
title = request.form['title'].encode('utf-8')
desc = request.form['desc'].encode('utf-8')
select_type = 2 # default is direct
select_type = request.form['select_type'] # 1 random / 2 direct
groupno = request.form.getlist('groupno')
print(groupno)
i=0
while i < len(groupno):
if f1.filename == "":
article = Article(title, user_email, desc, "Null", time.strftime('%Y-%m-%d %H:%M'), select_type, groupno[i])
else:
f1_name = secure_filename(f1.filename).encode('utf-8')
print(groupno[i])
article = Article(title, user_email, desc, \
f1_name, time.strftime('%Y-%m-%d %H:%M'), select_type, groupno[i])
f1.save('/home/ec2-user/project/Sharable/share/static/file_pic/' + user_email + secure_filename(f1.filename))
ses.add(article)
ses.commit()
i += 1
ses.close()
return redirect(url_for('show_list'))
# Show article lists
@app.route('/show_list')
def show_list():
Session = sessionmaker(bind=engine)
ses = Session()
articles = ses.query(Article, Sharegroup.name).filter(Article.groupno==Sharegroup.no).order_by(desc(Article.id)).all()
ses.close()
return render_template('show_list.html', articles=articles)
# Show article detail
@app.route('/article_detail', methods=['GET'])
@app.route('/article_detail/<int:article_no>', methods=['GET'])
def article_detail(article_no=None):
Session = sessionmaker(bind=engine)
ses = Session()
detail_content = ses.query(Article, Member.name).filter(Article.id == article_no) \
.filter(Article.email == Member.email).one()
ses.close()
comments = ses.query(Member.name, Comment.comment, Comment.reg_date, Comment.email, Comment.id). \
filter(Comment.article_id == article_no).filter(Member.email == Comment.email).all()
applicants = ses.query(Member.name, Applicant.id, Applicant.success).filter(Article.id == article_no). \
filter(Applicant.email == Member.email).filter(Applicant.article_id == Article.id).all()
return render_template('article_detail.html', detail=detail_content, \
comments=comments, applicants=applicants)
@app.route('/article_delete/<int:article_no>', methods=['GET'])
def article_delete(article_no):
Session = sessionmaker(bind=engine)
ses = Session()
applicant2 = ses.query(Applicant).filter(Applicant.article_id == article_no).first()
if applicant2 is None:
art_del = ses.query(Article).filter(Article.id == article_no).one()
ses.delete(art_del)
ses.commit()
ses.close()
return redirect(url_for('show_list'))
else:
ses.close()
return redirect(url_for('article_detail', article_no=article_no))
@app.route('/applicant_select/<int:article_id>', methods=['GET','POST'])
@app.route('/applicant_select/<int:applicant>', methods=['POST'])
def applicant_select(applicant=None, article_id=None):
Session = sessionmaker(bind=engine)
ses = Session()
if applicant is not None:
winner = ses.query(Applicant).filter(Applicant.id == applicant).one()
winner.success = 1
else:
applicants = ses.query(Applicant).filter(Applicant.article_id == article_id).all()
var = random.randint(0,len(applicants)-1)
applicants[var].success=1
id = applicants[var].id
email = applicants[var].email
ses.commit()
ses.close()
return jsonify(id=id,
article_id=article_id,
email=email,
success=1)
ses.commit()
ses.close()
return "hello"
@app.route('/applicant_deselect/<int:applicant>', methods=['get'])
def applicant_deselect(applicant=None):
Session = sessionmaker(bind=engine)
ses = Session()
winner = ses.query(Applicant).filter(Applicant.id == applicant).one()
winner.success = 0
ses.commit()
ses.close()
return redirect(request.referrer)
# 물건 신청 취소
@app.route('/applicant_delete/<int:applicant_no>', methods=['get'])
def applicant_delete(applicant_no=None):
Session = sessionmaker(bind=engine)
ses = Session()
applicant = ses.query(Applicant).filter(Applicant.id == applicant_no).one()
ses.delete(applicant)
ses.commit()
ses.close()
return redirect(request.referrer)
# 물건 신청하기
@app.route('/applicant_submit', methods=['post'])
def applicant_submit():
article_no = request.form['article_id']
Session = sessionmaker(bind=engine)
ses = Session()
if request.form['email'] == '':
return redirect(url_for('article_detail', article_no=article_no))
try:
ses.query(Applicant).filter(Applicant.email == request.form['email']) \
.filter(Applicant.article_id == article_no).one()
except:
applicant = Applicant(request.form['article_id'], request.form['email'])
ses.add(applicant)
ses.commit()
finally:
ses.close()
return redirect(url_for('article_detail', article_no=article_no))
# 댓글 삭제
@app.route('/delete_comment/<int:comment_no>', methods=['GET'])
def delete_comment(comment_no=None):
Session = sessionmaker(bind=engine)
ses = Session()
comment = ses.query(Comment).filter(Comment.id == comment_no).one()
ses.delete(comment)
ses.commit()
ses.close()
return redirect(request.referrer)
# 댓글 달기
@app.route('/comment_submit', methods=['POST'])
def comment_submit():
Session = sessionmaker(bind=engine)
ses = Session()
article_no = request.form['no']
if request.form['reply'] is "":
pass
elif request.form['email'] == "":
return redirect(url_for('article_detail', article_no=article_no))
else:
reply = request.form['reply'].encode('utf-8')
comment = Comment(reply, time.strftime('%Y-%m-%d %H:%M:%S'),
request.form['email'], request.form['no'])
ses.add(comment)
ses.commit()
ses.close()
return redirect(url_for('article_detail', article_no=article_no))
# -------------------- Article End --------------------------------------
# -------------------- Member Start -------------------------------------
@app.route('/login')
def login():
return render_template('login.html')
@app.route('/mypage/<string:email>')
def mypage(email=None):
Session = sessionmaker(bind=engine)
ses = Session()
member = ses.query(Member).filter(Member.email == email).one()
articles = ses.query(Article, Sharegroup.name).filter(Article.groupno==Sharegroup.no).filter(Article.email == email).all()
group_no = ses.query(Groupmemberlist.group_no).filter(Groupmemberlist.email == email).all()
my_product = ses.query(Article, Applicant.success).filter(Article.id == Applicant.article_id).filter(Applicant.email==email).all()
i = 0
groups = []
while i < len(group_no):
group = ses.query(Sharegroup).filter(Sharegroup.no == group_no[i][0]).one()
groups.append(group)
i += 1
ses.close()
return render_template('mypage.html', member=member, articles=articles, groups=groups, my_product=my_product)
@app.route('/login_submit', methods=['post'])
def login_submit():
Session = sessionmaker(bind=engine)
ses = Session()
session['name'] = None
if request.form['email'] is "":
return redirect(url_for('login'))
elif request.form['pw'] is "":
return redirect(url_for('login'))
else:
email = request.form['email']
member = ses.query(Member).filter(Member.email == email).first()
ses.close()
if member is None:
return redirect(url_for('login'))
else:
# 이메일은 맞음
hashword = check_password_hash(member.pw, request.form['pw'])
if hashword:
# session.permanent = True
session['logged_in'] = request.form['email']
session['name'] = member.name
return redirect(url_for('index'))
else:
return redirect(url_for('login'))
@app.route('/logout')
def logout():
session.pop('logged_in', None)
session.pop('name', None)
return redirect(url_for('index'))
@app.route('/join', methods=['GET', 'POST'])
def join():
Session = sessionmaker(bind=engine)
ses = Session()
if request.method == 'POST':
print("start")
try:
if request.form['email'] is "":
print(request.form['email'])
return redirect(url_for('join'))
check = ses.query(Member).filter(Member.email == request.form['email']).one()
except:
print("except")
print(request.form['email'])
name = request.form['name']
print(name)
if name is "":
return redirect(url_for('join'))
elif request.form['password'] is "":
return redirect(url_for('join'))
hashword = generate_password_hash(request.form['password'])
new_member = Member(request.form['email'],
name, hashword)
ses.add(new_member)
ses.commit()
return render_template('login.html')
finally:
ses.close()
return redirect(url_for('join'))
else:
return render_template('join.html')
# -------------------- Member End ---------------------------------------
# -------------------- FreeBoard Start ----------------------------------
@app.route('/free', methods=["post", "get"])
def free_board():
Session = sessionmaker(bind=engine)
ses = Session()
if request.method == "POST":
title = request.form['title']
content = request.form['content']
email = request.form['email']
time1 = time.strftime('%Y-%m-%d %H:%M:%S')
free = Freeboard(title, content, time1, email)
ses.add(free)
ses.commit()
ses.close()
return redirect(url_for("free_board"))
else:
frees = ses.query(Freeboard, Member.name).filter(Freeboard.email == Member.email).order_by(
desc(Freeboard.reg_date)).all()
ses.close()
return render_template('free_board.html', frees=frees)
# -------------------- FreeBoard End ------------------------------------
| null |
share/application.py
|
application.py
|
py
| 19,186 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.g.user_name",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "flask.g.user_email",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "flask.g.user_email",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "flask.g.user_name",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "share.app.before_request",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "share.app",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "share.models.engine",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "share.models.Groupmemberlist.group_no",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "share.models.Groupmemberlist",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "share.models.Groupmemberlist.email",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "flask.g.user_email",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "share.models.Groupmemberlist.group_no",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "share.models.Groupmemberlist",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "share.models.Groupmemberlist.email",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "flask.g.user_email",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "share.models.Sharegroup",
"line_number": 64,
"usage_type": "argument"
},
{
"api_name": "share.models.Sharegroup.no",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "share.models.Article",
"line_number": 65,
"usage_type": "argument"
},
{
"api_name": "share.models.Article.groupno",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "share.models.Groupmemberlist",
"line_number": 66,
"usage_type": "argument"
},
{
"api_name": "share.models.Groupmemberlist.group_no",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "share.models.Sharegroup",
"line_number": 75,
"usage_type": "argument"
},
{
"api_name": "share.models.Sharegroup.no",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "share.models.Article",
"line_number": 76,
"usage_type": "argument"
},
{
"api_name": "share.models.Article.groupno",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "share.models.Groupmemberlist",
"line_number": 77,
"usage_type": "argument"
},
{
"api_name": "share.models.Groupmemberlist.group_no",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "share.models.Article",
"line_number": 87,
"usage_type": "argument"
},
{
"api_name": "share.models.Article.groupno",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "share.app.route",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "share.models.engine",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "share.models.Groupmemberlist.group_no",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "share.models.Groupmemberlist",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "share.models.Groupmemberlist.email",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "flask.g.user_email",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "share.models.Sharegroup.no",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "share.models.Sharegroup",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "share.models.Sharegroup",
"line_number": 117,
"usage_type": "argument"
},
{
"api_name": "share.models.Member.name",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "share.models.Member",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "share.models.Sharegroup.email",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "share.models.Member.email",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "share.models.Sharegroup.no",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "share.models.Sharegroup",
"line_number": 123,
"usage_type": "argument"
},
{
"api_name": "share.models.Member.name",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "share.models.Member",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "share.models.Sharegroup.email",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "share.models.Member.email",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "share.models.Sharegroup.name.like",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "share.models.Sharegroup.name",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "share.models.Sharegroup",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "share.app.route",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "share.models.engine",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "share.models.Groupmemberlist",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "share.app.route",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "share.models.engine",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "share.models.Sharegroup",
"line_number": 155,
"usage_type": "argument"
},
{
"api_name": "share.models.Sharegroup.name",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "flask.redirect",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "flask.request.files",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "share.models.Sharegroup",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "flask.g.user_email",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "share.models.Sharegroup.no",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "share.models.Sharegroup",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "share.models.Sharegroup.name",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "share.models.Groupmemberlist",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "flask.g.user_email",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "share.app.route",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "groupInfo.GroupInfo",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "share.models.engine",
"line_number": 190,
"usage_type": "name"
},
{
"api_name": "share.models.Article",
"line_number": 192,
"usage_type": "argument"
},
{
"api_name": "share.models.Article.groupno",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "share.app.route",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "share.models.engine",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "share.models.Sharegroup",
"line_number": 205,
"usage_type": "argument"
},
{
"api_name": "share.models.Sharegroup.no",
"line_number": 205,
"usage_type": "attribute"
},
{
"api_name": "groupInfo.GroupInfo",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "share.app.route",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "share.models.engine",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "share.models.Sharegroup",
"line_number": 221,
"usage_type": "argument"
},
{
"api_name": "share.models.Sharegroup.name",
"line_number": 221,
"usage_type": "attribute"
},
{
"api_name": "share.app.route",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "share.models.engine",
"line_number": 236,
"usage_type": "name"
},
{
"api_name": "share.models.Groupmemberlist.group_no",
"line_number": 239,
"usage_type": "attribute"
},
{
"api_name": "share.models.Groupmemberlist",
"line_number": 239,
"usage_type": "name"
},
{
"api_name": "share.models.Groupmemberlist.email",
"line_number": 239,
"usage_type": "attribute"
},
{
"api_name": "flask.g.user_email",
"line_number": 239,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 239,
"usage_type": "name"
},
{
"api_name": "groupInfo.GroupInfo",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "share.app.route",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "share.models.engine",
"line_number": 253,
"usage_type": "name"
},
{
"api_name": "flask.request.files",
"line_number": 255,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 255,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 257,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 257,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.desc",
"line_number": 258,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 258,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 258,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 260,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "flask.request.form.getlist",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 261,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 261,
"usage_type": "name"
},
{
"api_name": "share.models.Article",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.desc",
"line_number": 266,
"usage_type": "argument"
},
{
"api_name": "time.strftime",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "werkzeug.utils.secure_filename",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "share.models.Article",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.desc",
"line_number": 270,
"usage_type": "argument"
},
{
"api_name": "time.strftime",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "werkzeug.utils.secure_filename",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "share.app.route",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 251,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "share.models.engine",
"line_number": 282,
"usage_type": "name"
},
{
"api_name": "share.models.Article",
"line_number": 284,
"usage_type": "argument"
},
{
"api_name": "share.models.Sharegroup.name",
"line_number": 284,
"usage_type": "attribute"
},
{
"api_name": "share.models.Sharegroup",
"line_number": 284,
"usage_type": "name"
},
{
"api_name": "share.models.Article.groupno",
"line_number": 284,
"usage_type": "attribute"
},
{
"api_name": "share.models.Sharegroup.no",
"line_number": 284,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.desc",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "share.models.Article.id",
"line_number": 284,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "share.app.route",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "share.models.engine",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "share.models.Article",
"line_number": 295,
"usage_type": "argument"
},
{
"api_name": "share.models.Member.name",
"line_number": 295,
"usage_type": "attribute"
},
{
"api_name": "share.models.Member",
"line_number": 295,
"usage_type": "name"
},
{
"api_name": "share.models.Article.id",
"line_number": 295,
"usage_type": "attribute"
},
{
"api_name": "share.models.Article.email",
"line_number": 296,
"usage_type": "attribute"
},
{
"api_name": "share.models.Article",
"line_number": 296,
"usage_type": "name"
},
{
"api_name": "share.models.Member.email",
"line_number": 296,
"usage_type": "attribute"
},
{
"api_name": "share.models.Member",
"line_number": 296,
"usage_type": "name"
},
{
"api_name": "share.models.Member.name",
"line_number": 298,
"usage_type": "attribute"
},
{
"api_name": "share.models.Member",
"line_number": 298,
"usage_type": "name"
},
{
"api_name": "share.models.Comment.comment",
"line_number": 298,
"usage_type": "attribute"
},
{
"api_name": "share.models.Comment",
"line_number": 298,
"usage_type": "name"
},
{
"api_name": "share.models.Comment.reg_date",
"line_number": 298,
"usage_type": "attribute"
},
{
"api_name": "share.models.Comment.email",
"line_number": 298,
"usage_type": "attribute"
},
{
"api_name": "share.models.Comment.id",
"line_number": 298,
"usage_type": "attribute"
},
{
"api_name": "share.models.Comment.article_id",
"line_number": 299,
"usage_type": "attribute"
},
{
"api_name": "share.models.Comment",
"line_number": 299,
"usage_type": "name"
},
{
"api_name": "share.models.Member.email",
"line_number": 299,
"usage_type": "attribute"
},
{
"api_name": "share.models.Member",
"line_number": 299,
"usage_type": "name"
},
{
"api_name": "share.models.Comment.email",
"line_number": 299,
"usage_type": "attribute"
},
{
"api_name": "share.models.Member.name",
"line_number": 300,
"usage_type": "attribute"
},
{
"api_name": "share.models.Member",
"line_number": 300,
"usage_type": "name"
},
{
"api_name": "share.models.Applicant.id",
"line_number": 300,
"usage_type": "attribute"
},
{
"api_name": "share.models.Applicant",
"line_number": 300,
"usage_type": "name"
},
{
"api_name": "share.models.Applicant.success",
"line_number": 300,
"usage_type": "attribute"
},
{
"api_name": "share.models.Article.id",
"line_number": 300,
"usage_type": "attribute"
},
{
"api_name": "share.models.Article",
"line_number": 300,
"usage_type": "name"
},
{
"api_name": "share.models.Applicant.email",
"line_number": 301,
"usage_type": "attribute"
},
{
"api_name": "share.models.Applicant",
"line_number": 301,
"usage_type": "name"
},
{
"api_name": "share.models.Member.email",
"line_number": 301,
"usage_type": "attribute"
},
{
"api_name": "share.models.Member",
"line_number": 301,
"usage_type": "name"
},
{
"api_name": "share.models.Applicant.article_id",
"line_number": 301,
"usage_type": "attribute"
},
{
"api_name": "share.models.Article.id",
"line_number": 301,
"usage_type": "attribute"
},
{
"api_name": "share.models.Article",
"line_number": 301,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "share.app.route",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 290,
"usage_type": "name"
},
{
"api_name": "share.app.route",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 291,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "share.models.engine",
"line_number": 308,
"usage_type": "name"
},
{
"api_name": "share.models.Applicant",
"line_number": 310,
"usage_type": "argument"
},
{
"api_name": "share.models.Applicant.article_id",
"line_number": 310,
"usage_type": "attribute"
},
{
"api_name": "share.models.Article",
"line_number": 313,
"usage_type": "argument"
},
{
"api_name": "share.models.Article.id",
"line_number": 313,
"usage_type": "attribute"
},
{
"api_name": "flask.redirect",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "share.app.route",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 306,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "share.models.engine",
"line_number": 325,
"usage_type": "name"
},
{
"api_name": "share.models.Applicant",
"line_number": 328,
"usage_type": "argument"
},
{
"api_name": "share.models.Applicant.id",
"line_number": 328,
"usage_type": "attribute"
},
{
"api_name": "share.models.Applicant",
"line_number": 331,
"usage_type": "argument"
},
{
"api_name": "share.models.Applicant.article_id",
"line_number": 331,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "share.app.route",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 322,
"usage_type": "name"
},
{
"api_name": "share.app.route",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 323,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "share.models.engine",
"line_number": 349,
"usage_type": "name"
},
{
"api_name": "share.models.Applicant",
"line_number": 351,
"usage_type": "argument"
},
{
"api_name": "share.models.Applicant.id",
"line_number": 351,
"usage_type": "attribute"
},
{
"api_name": "flask.redirect",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "flask.request.referrer",
"line_number": 355,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 355,
"usage_type": "name"
},
{
"api_name": "share.app.route",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 347,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "share.models.engine",
"line_number": 361,
"usage_type": "name"
},
{
"api_name": "share.models.Applicant",
"line_number": 363,
"usage_type": "argument"
},
{
"api_name": "share.models.Applicant.id",
"line_number": 363,
"usage_type": "attribute"
},
{
"api_name": "flask.redirect",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "flask.request.referrer",
"line_number": 367,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 367,
"usage_type": "name"
},
{
"api_name": "share.app.route",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 359,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 373,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 373,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "share.models.engine",
"line_number": 374,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 376,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 376,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "share.models.Applicant",
"line_number": 379,
"usage_type": "argument"
},
{
"api_name": "share.models.Applicant.email",
"line_number": 379,
"usage_type": "attribute"
},
{
"api_name": "flask.request.form",
"line_number": 379,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 379,
"usage_type": "name"
},
{
"api_name": "share.models.Applicant.article_id",
"line_number": 380,
"usage_type": "attribute"
},
{
"api_name": "share.models.Applicant",
"line_number": 380,
"usage_type": "name"
},
{
"api_name": "share.models.Applicant",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 382,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 382,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "share.app.route",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 371,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "share.models.engine",
"line_number": 392,
"usage_type": "name"
},
{
"api_name": "share.models.Comment",
"line_number": 394,
"usage_type": "argument"
},
{
"api_name": "share.models.Comment.id",
"line_number": 394,
"usage_type": "attribute"
},
{
"api_name": "flask.redirect",
"line_number": 398,
"usage_type": "call"
},
{
"api_name": "flask.request.referrer",
"line_number": 398,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 398,
"usage_type": "name"
},
{
"api_name": "share.app.route",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 390,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 404,
"usage_type": "call"
},
{
"api_name": "share.models.engine",
"line_number": 404,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 406,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 406,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 407,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 407,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 409,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 409,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 412,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 412,
"usage_type": "name"
},
{
"api_name": "share.models.Comment",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 414,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 414,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "share.app.route",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 402,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "share.app.route",
"line_number": 425,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 425,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 432,
"usage_type": "call"
},
{
"api_name": "share.models.engine",
"line_number": 432,
"usage_type": "name"
},
{
"api_name": "share.models.Member",
"line_number": 434,
"usage_type": "argument"
},
{
"api_name": "share.models.Member.email",
"line_number": 434,
"usage_type": "attribute"
},
{
"api_name": "share.models.Article",
"line_number": 435,
"usage_type": "argument"
},
{
"api_name": "share.models.Sharegroup.name",
"line_number": 435,
"usage_type": "attribute"
},
{
"api_name": "share.models.Sharegroup",
"line_number": 435,
"usage_type": "name"
},
{
"api_name": "share.models.Article.groupno",
"line_number": 435,
"usage_type": "attribute"
},
{
"api_name": "share.models.Sharegroup.no",
"line_number": 435,
"usage_type": "attribute"
},
{
"api_name": "share.models.Article.email",
"line_number": 435,
"usage_type": "attribute"
},
{
"api_name": "share.models.Groupmemberlist.group_no",
"line_number": 436,
"usage_type": "attribute"
},
{
"api_name": "share.models.Groupmemberlist",
"line_number": 436,
"usage_type": "name"
},
{
"api_name": "share.models.Groupmemberlist.email",
"line_number": 436,
"usage_type": "attribute"
},
{
"api_name": "share.models.Article",
"line_number": 437,
"usage_type": "argument"
},
{
"api_name": "share.models.Applicant.success",
"line_number": 437,
"usage_type": "attribute"
},
{
"api_name": "share.models.Applicant",
"line_number": 437,
"usage_type": "name"
},
{
"api_name": "share.models.Article.id",
"line_number": 437,
"usage_type": "attribute"
},
{
"api_name": "share.models.Applicant.article_id",
"line_number": 437,
"usage_type": "attribute"
},
{
"api_name": "share.models.Applicant.email",
"line_number": 437,
"usage_type": "attribute"
},
{
"api_name": "share.models.Sharegroup",
"line_number": 441,
"usage_type": "argument"
},
{
"api_name": "share.models.Sharegroup.no",
"line_number": 441,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "share.app.route",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 430,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 450,
"usage_type": "call"
},
{
"api_name": "share.models.engine",
"line_number": 450,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 452,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 453,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 453,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 454,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 454,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 455,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 455,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 458,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 458,
"usage_type": "name"
},
{
"api_name": "share.models.Member",
"line_number": 459,
"usage_type": "argument"
},
{
"api_name": "share.models.Member.email",
"line_number": 459,
"usage_type": "attribute"
},
{
"api_name": "flask.redirect",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "werkzeug.security.check_password_hash",
"line_number": 465,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 465,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 465,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 468,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 468,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 468,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 469,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 472,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 472,
"usage_type": "call"
},
{
"api_name": "share.app.route",
"line_number": 448,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 448,
"usage_type": "name"
},
{
"api_name": "flask.session.pop",
"line_number": 477,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 477,
"usage_type": "name"
},
{
"api_name": "flask.session.pop",
"line_number": 478,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 478,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 480,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 480,
"usage_type": "call"
},
{
"api_name": "share.app.route",
"line_number": 475,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 475,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 485,
"usage_type": "call"
},
{
"api_name": "share.models.engine",
"line_number": 485,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 487,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 487,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 490,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 490,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 491,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 491,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 492,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 492,
"usage_type": "call"
},
{
"api_name": "share.models.Member",
"line_number": 493,
"usage_type": "argument"
},
{
"api_name": "share.models.Member.email",
"line_number": 493,
"usage_type": "attribute"
},
{
"api_name": "flask.request.form",
"line_number": 493,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 493,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 496,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 496,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 498,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 498,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 501,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 501,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 502,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 502,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 503,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 503,
"usage_type": "call"
},
{
"api_name": "werkzeug.security.generate_password_hash",
"line_number": 504,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 504,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 504,
"usage_type": "name"
},
{
"api_name": "share.models.Member",
"line_number": 505,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 505,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 505,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 509,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 512,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 512,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 514,
"usage_type": "call"
},
{
"api_name": "share.app.route",
"line_number": 483,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 483,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 524,
"usage_type": "call"
},
{
"api_name": "share.models.engine",
"line_number": 524,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 526,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 526,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 527,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 527,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 528,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 528,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 529,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 529,
"usage_type": "name"
},
{
"api_name": "time.strftime",
"line_number": 530,
"usage_type": "call"
},
{
"api_name": "share.models.Freeboard",
"line_number": 531,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 535,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 535,
"usage_type": "call"
},
{
"api_name": "share.models.Freeboard",
"line_number": 537,
"usage_type": "argument"
},
{
"api_name": "share.models.Member.name",
"line_number": 537,
"usage_type": "attribute"
},
{
"api_name": "share.models.Member",
"line_number": 537,
"usage_type": "name"
},
{
"api_name": "share.models.Freeboard.email",
"line_number": 537,
"usage_type": "attribute"
},
{
"api_name": "share.models.Member.email",
"line_number": 537,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.desc",
"line_number": 538,
"usage_type": "call"
},
{
"api_name": "share.models.Freeboard.reg_date",
"line_number": 538,
"usage_type": "attribute"
},
{
"api_name": "share.models.Freeboard",
"line_number": 538,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 540,
"usage_type": "call"
},
{
"api_name": "share.app.route",
"line_number": 522,
"usage_type": "call"
},
{
"api_name": "share.app",
"line_number": 522,
"usage_type": "name"
}
] |
177159009
|
import transakcije_io
import datetime
def napraviTransakciju(sifra, kolicina, smer_transakcije, trenutni_korisnik):
transakcije_io.upisiTransakciju(sifra, kolicina, smer_transakcije, trenutni_korisnik)
# uzima pocetni i krajnji datum (string formata 'dd.mm.gggg.'),
# i opciono kljuc ('magacioner' ili 'sifra')
# i vrednost za dodatno pretrazivanje po unetom kljucu
def pretragaTransakcija(datum1, datum2, kljuc=None, vrednost=None):
transakcije = transakcije_io.ucitajTransakcije()
pronadjene = []
for t in transakcije:
if proveriDatum(datum1, t['datum'], datum2):
pronadjene.append(t)
if kljuc is not None:
filtrirane = []
for i, u in enumerate(pronadjene):
if vrednost in u[kljuc]:
filtrirane.append(pronadjene[i])
return filtrirane
return pronadjene
# uzima pocetni i krajnji datum (string formata 'dd.mm.gggg.'),
# i datum koji treba da bude u unetom intervalu datuma
def proveriDatum(datum1, datum, datum2):
datum1 = datetime.datetime.strptime(datum1, "%d.%m.%Y.")
datum2 = datetime.datetime.strptime(datum2, "%d.%m.%Y.")
datum = datetime.datetime.strptime(datum, "%d.%m.%Y.")
if datum >= datum1 and datum <= datum2:
return True
return False
| null |
transakcije.py
|
transakcije.py
|
py
| 1,276 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "transakcije_io.upisiTransakciju",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "transakcije_io.ucitajTransakcije",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 31,
"usage_type": "attribute"
}
] |
644259544
|
import os
import hashlib, uuid
SALT = uuid.uuid4().hex
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
WTF_CSRF_ENABLED = True
SECRET_KEY = 'Alumiboti'
POSTS_PER_PAGE = 5
| null |
config.py
|
config.py
|
py
| 316 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "uuid.uuid4",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
}
] |
126045875
|
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import numpy as np
import netCDF4 as nc
from pylab import *
import subprocess
from subprocess import call
d=nc.Dataset('DATA/Geostropic_Adjustment.nc')
lons=d.variables['longs_v'][:] # only need this as rho is defined at v points
levs=d.variables['half_level'][:]
times=d.variables['time'][:]
u=d.variables['u'][:,:,:]
v=d.variables['v'][:,:,:]
p=d.variables['rho_prime'][:,:,:]
p=p*100
#levels=np.arange(11, dtype=float)
#levels=np.arange(-10,12,2, dtype=float)
levels=[-1.0,-0.75,-0.5,-0.25,-0.2,-0.15,-0.1,-0.05,-0.01,0.01,0.05,0.1,0.15,0.2,0.25,0.5,0.75,1.0]
#cmap=cm.Greys
cmap=cm.bwr
init=np.where(times==0.0)[0][0]
half=np.where(times==1800.0)[0][0]
one=np.where(times==3600.0)[0][0]
two=np.where(times==7200.0)[0][0]
three=np.where(times==10800.0)[0][0]
levs=levs*0.001
lons=lons*0.001
X,Y = meshgrid(lons,levs)
# ARRAY SHAPES ARE 0=time, 1=levels, 2=longitudes; e.g. (31, 60, 360)
# make subroutine for plotting subplots (initial time is done explicitly)
def pltsubplot(X,Y,p,u,v,levels,lons,levs, time, title):
plt.contourf(X,Y,p[time,:,:], levels, cmap=cm.get_cmap(cmap, len(levels)-1) )
# plt.contour(X,Y,p[time,:,:])#, colors='k' )
plt.axis([min(lons), max(lons), min(levs), max(levs)])
Q=quiver(X[::3,::30],Y[::3,::30], u[time,::3,::30], v[time,::3,::30], color='k', scale=40)
if time == half:
quiverkey(Q, 0.9, 1.1, 4, '10 ms$^{-1}$', labelpos='E',)
plt.title(title)
# set up plot
plt.clf
plt.figure()
# make intitial plot with additional info
fg1 = plt.subplot2grid((6,4), (0, 1), colspan=2, rowspan=2)
geo=plt.contourf(X,Y,p[init,:,:], levels, cmap=cm.get_cmap(cmap, len(levels)-1) )
#plt.contour(X,Y,p[init,:,:])#, colors='k' )
plt.axis([min(lons), max(lons), min(levs), max(levs)])
plt.title("Initial perturbation")
plt.xlabel('Longitudinal distance (km)')
plt.ylabel('Height (km)')
cb=plt.colorbar(geo,orientation='vertical', shrink=0.9,anchor=(2.25, 0.50), pad=-0.25)
#cb.ax.set_yticklabels(['-1.0', '0', '1.0'])
# timeslices using subrouting pltsubplot
fg2 = plt.subplot2grid((6,4), (2, 0), colspan=2, rowspan=2)
pltsubplot(X,Y,p,u,v,levels,lons,levs,half,"Time = Half hour")
fg3 = plt.subplot2grid((6,4), (2, 2), colspan=2, rowspan=2)
pltsubplot(X,Y,p,u,v,levels,lons,levs,one,"Time = One hour")
fg4 = plt.subplot2grid((6,4), (4, 0), colspan=2, rowspan=2)
pltsubplot(X,Y,p,u,v,levels,lons,levs,two,"Time = Two hours")
fg5 = plt.subplot2grid((6,4), (4,2), colspan=2, rowspan=2)
pltsubplot(X,Y,p,u,v,levels,lons,levs,three,"Time = Three hours")
plt.tight_layout()
ofile="geostrophic_adj.eps"
# WRITE FIGURE TO FILE
#=========================
plt.savefig(ofile, format='eps', dpi=1000)
# DISPLAY FIGURE
#=========================
call("display "+ofile+"&", shell=True)
#plt.plot(xax_hoz, c_gr_h_ref[4,:], 'k', label='C$_{gr}$')
#plt.plot(xax_hoz, c_ac_h_ref[4,:], 'k--', label='C$_{ac}$')
#plt.title('Wavespeeds as a function of horizontal wavenumber')
#plt.legend(loc='best')
#plt.axis('tight')
#plt.ylabel('Wave speed ms$^{-1}$')
#plt.xlabel('Wave number')
#plt.show()
#plt.close()
| null |
ABC_MODEL/MATLAB_FILES_AND_DATA/plt_geost_adj.py
|
plt_geost_adj.py
|
py
| 3,121 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "netCDF4.Dataset",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.contourf",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot2grid",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.contourf",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.colorbar",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot2grid",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot2grid",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot2grid",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot2grid",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "subprocess.call",
"line_number": 89,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.