seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
250555844
|
# Good morning! Here's your coding interview problem for today.
# This problem was asked by IBM.
# Given an integer, find the next permutation of it in absolute order.
# For example, given 48975, the next permutation would be 49578.
from itertools import permutations
def one_digit_checker(num):
if (num >= 0) and (num < 10):
return True
else:
return False
def digit_separator(number):
digits_list = []
if one_digit_checker(number):
digits_list.append(number)
else:
temp_number = number
while one_digit_checker(temp_number) == False:
last_digit = temp_number % 10
digits_list.append(int(last_digit))
temp_number = temp_number - last_digit
temp_number = temp_number / 10
if one_digit_checker(temp_number) == True:
digits_list.append(int(temp_number))
return digits_list
def next_permutation_finder(my_number):
my_digit_list = digit_separator(my_number)
my_perm = permutations(my_digit_list)
my_perm_list = list(my_perm)
perm_num_list = []
for one_perm in my_perm_list:
temp_sum = 0
for k in range(0, len(one_perm), 1):
if one_perm[k] == 0:
break
else:
temp_sum += one_perm[k] * 10**(len(one_perm)-(k+1))
perm_num_list.append(temp_sum)
differences = []
temp_diff = 0
for perm_num in perm_num_list:
if perm_num > my_number:
temp_diff = perm_num - my_number
differences.append(temp_diff)
min_diff = min(differences)
next_permutation = my_number + min_diff
return next_permutation
my_number = 48975
print(next_permutation_finder(my_number))
| null |
Daily Coding Problem/Solved/Problem#205_Easy.py
|
Problem#205_Easy.py
|
py
| 1,744 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "itertools.permutations",
"line_number": 36,
"usage_type": "call"
}
] |
605340624
|
from visdom import Visdom
import time
class VisdomLinePlotter(object):
def __init__(self, vis, color='red', size=5, title=None, ylabel=None, xlabel=None, linelabel=None):
self.vis = vis
self.title = title
self.ylabel = ylabel
self.xlabel = xlabel
# this holds the data to be plotted
self.trace = [dict(x=[], y=[], mode='markers+lines', type='custom',
marker={'color': color, 'size': size}, name=linelabel)]
# this holds the layout of the plot
self.layout = dict(title=self.title, xaxis={'title': self.xlabel}, yaxis={'title': self.ylabel},
showlegend=True)
def add_new(self, color, size=5, linelabel=None):
# add new line
self.trace.append(dict(x=[], y=[], mode='markers+lines', type='custom',
marker={'color': color, 'size': size}, name=linelabel))
def update(self, new_x, new_y):
for i, tr in enumerate(self.trace):
tr['x'].append(new_x)
tr['y'].append(new_y[i])
self.vis._send({'data': self.trace, 'layout': self.layout, 'win': self.title})
###############################################################################################################
def main():
PORT = 7777
vis = Visdom(port=PORT)
# check if Visdom server is available
if vis.check_connection():
print('Visdom server is online - will log data ')
else:
print('Visdom server is offline - will not log data')
test = VisdomLinePlotter(vis, color='orange', title='testing', ylabel='accuracy', xlabel='epochs', linelabel='CNN+MLP')
test.add_new(color='blue', linelabel='CNN+RN')
for i in range(20):
test.update(i, [2*i, 3*i])
time.sleep(0.5)
if __name__ == '__main__':
main()
| null |
visdom_utils.py
|
visdom_utils.py
|
py
| 1,633 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "visdom.Visdom",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 51,
"usage_type": "call"
}
] |
477083590
|
import nltk
import torch
import re
from rupo.g2p import graphemes
gr = graphemes.Graphemes()
def repackage_hidden(h):
"""Wraps hidden states in new Tensors,
to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def batchify(data, bsz, args):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
if args.cuda:
data = data.cuda()
return data
def get_batch(source, i, args, seq_len=None, evaluation=False):
seq_len = min(seq_len if seq_len else args.bptt, len(source) - 1 - i)
data = source[i:i + seq_len]
target = source[i + 1:i + 1 + seq_len].view(-1)
return data, target
def tokenize(line):
tokens = []
for token in nltk.tokenize.wordpunct_tokenize(line.lower()):
if re.match('^[А-яЁё]+$', token):
s = gr.get_syllables(token)
res = [token] if len(s) == 0 else [getattr(x, 'text', x) for x in s]
res = res + ['<eow>']
else:
res = list(token) + ['<eow>']
tokens += res
return tokens
def tokenize_with_eos(text):
tokens = []
lines = text.split('\n')
for line in lines:
tokens += tokenize(line) + ['<eos>']
return tokens
if __name__ == '__main__':
import pymysql
db = pymysql.connect(host="194.63.158.122", # your host, usually localhost
user="sa", # your username
passwd="alfa377901", # your password
db="RK72012MIMI",port=5034) # name of the data base
| null |
utils.py
|
utils.py
|
py
| 1,878 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "rupo.g2p.graphemes.Graphemes",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "rupo.g2p.graphemes",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "nltk.tokenize.wordpunct_tokenize",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "nltk.tokenize",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "re.match",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pymysql.connect",
"line_number": 62,
"usage_type": "call"
}
] |
512786865
|
import json
import hashlib
import os
import re
import logging
import boto3
from botocore.vendored import requests
from botocore.exceptions import ClientError
# get variable from env
alien_apikey = os.environ['ALIEN_API_KEY']
alien_url = os.environ['ALIEN_URL']
confidence = os.environ['CONFIDENCE']
action_type = os.environ['ACTION']
hook_url = os.environ['HOOK_URL']
slack_channel = os.environ['SLACK_CHANNEL']
# define logger
log_level = os.environ['LOG_LEVEL'].upper()
logger = logging.getLogger()
logger.setLevel(log_level)
detected_pattern = re.compile(r"'alerts': \['Malware infection'\]")
# create s3 high level session
s3 = boto3.resource('s3')
# Do the action we pre-define
def action(detect_count,bucket_name,object_name):
if int(detect_count) > int(confidence) and action_type == 'DETECTION':
slack_message = {
'channel': slack_channel,
'text': "Detect suspicious file \'%s\' on \'%s\' bucket, please check!" % (object_name,bucket_name)
}
response = requests.post(hook_url, data=json.dumps(slack_message), headers={'Content-Type': 'application/json'})
if response.status_code != 200:
logger.error('Error code is: %s and the response is:\n%s' % (response.status_code, response.text))
logger.info('push to slack')
elif int(detect_count) > int(confidence) and action_type == 'PREVENTION':
obj = s3.Object(bucket_name, object_name)
buf = obj.delete()
logger.warning('Delete S3 object: ' + object_name)
# calculator SHA-1 hash
def hash_calculator(b_object):
hash = hashlib.sha1(b_object).hexdigest()
logger.info('SHA-1: ' + hash)
return hash
def lambda_handler(event, context):
for data in event['Records']:
bucket_name = data['s3']['bucket']['name']
object_name = data['s3']['object']['key']
# get object from S3
obj = s3.Object(bucket_name, object_name)
buf = obj.get()['Body'].read()
# calculator SHA-1 hash
h = hash_calculator(buf)
file_url = alien_url + h +'/analysis'
headers = {'X-OTX-API-KEY': alien_apikey}
rsp = requests.get(file_url, headers=headers)
if rsp.status_code != 200:
logger.error('Error code is: %s and the response is:\n%s' % (response.status_code, response.text))
rsp_json = rsp.text
rsp_dict = json.loads(rsp_json)
detect_count = 0
if rsp_dict['analysis'] != None:
for data in rsp_dict['analysis']['plugins']:
search = detected_pattern.search(str(rsp_dict['analysis']['plugins'][data]))
if search:
detect_count += 1
logger.info('Malware detect by: ' + str(data))
if detect_count > 0:
action(detect_count,bucket_name,object_name)
return {
"statusCode": 200,
"body": json.dumps('Scan complete!')
}
| null |
alienvault_s3.py
|
alienvault_s3.py
|
py
| 2,975 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.environ",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "boto3.resource",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "botocore.vendored.requests.post",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "botocore.vendored.requests",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "hashlib.sha1",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "botocore.vendored.requests.get",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "botocore.vendored.requests",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 85,
"usage_type": "call"
}
] |
3140707
|
"""Module for solving Equation 6 of Canto, Raga, & Wilkin 1996 (CRW96)
This finds the radius of a stellar wind interaction bowshock in terms
of the momentum and angular momentum injected by the two winds.
CRW96 concentrated on the case of isotropic winds, but this module
will work generally with any cylindrically symmetric wind
All positions and distances are in units of D, the interstar
separation
The philosophy of this module is to be as straightforward and
transparent an implementation as possible of the equations in the
paper. There is no attempt to be efficient.
"""
from __future__ import print_function
import numpy as np
import scipy.integrate
import scipy.optimize
from scipy.special import gamma as gamma_func
DEBUG_LEVEL = 0
###
### Public functions
###
def isotropic_momentum(theta):
"""Momentum as a function of angle for an isotropic wind"""
return 1.0
DIFFUSE_BETA = 0.0 # Parameter giving relative strength of diffuse field
def proplyd_momentum(theta):
"""Momentum as a function of angle for a proplyd wind
Proportional to sqrt(cos(theta)) in the head (theta < pi/2), and
then a small constant value (zero by default) in the tail (theta >
pi/2). The tail value is set via the module-level variable
DIFFUSE_BETA.
"""
return DIFFUSE_BETA + (1.0 - DIFFUSE_BETA)*np.sqrt(max(0.0,np.cos(theta)))
global MOMENTUM_K
MOMENTUM_K = 1.0
def anisotropic_momentum(theta):
"""Momentum as a power law in cos(theta) on the forward hemisphere only"""
if theta <= np.pi/2:
return np.cos(theta)**MOMENTUM_K
else:
return 0.0
###
### Public classes
###
class Wind(object):
"""Class to represent a stellar wind (or proplyd, etc)
axial_momentum_flux is the momentum flux at theta=0 (or mu=1) in
arbitrary units
momentum_law is a function that describes how the momentum flux
per unit solid angle varies with theta
origin is a flag that is True if the center of this wind is at the
co-ordinate origin. If origin is False, then the center of the
wind is at unit distance from the origin along the z axis.
"""
def __init__(self, axial_momentum_flux=1.0, momentum_law=isotropic_momentum, origin=True):
self.axial_momentum_flux = axial_momentum_flux
self.momentum_law = momentum_law
self.origin = origin
def Jdot(self, theta):
"""Angular momentum injection rate about the origin,
integrated between axis and theta
"""
if self.origin:
return 0.0
else:
if self.momentum_law == isotropic_momentum:
return 0.25*self.axial_momentum_flux*(theta - np.sin(theta)*np.cos(theta))
else:
# I haven't implemented the numerical integration yet
# in this case, but hopefully we will not need it
raise NotImplementedError
def Pidot_z(self, theta):
"""Linear z-momentum injection rate, integrated between axis
and theta
"""
if self.momentum_law == isotropic_momentum:
# Analytic solution for isotropic case
Pdz = 0.25*np.sin(theta)**2
else:
# Numerical integration for the general case
Pdz, err = scipy.integrate.quad(self._integrand_Pdz, 0.0, theta)
if self.origin:
return Pdz*self.axial_momentum_flux
else:
# The second star has oppositely directed axial momentum
return -Pdz*self.axial_momentum_flux
def Pidot_r(self, theta):
"""Linear r-momentum injection rate, integrated between axis
and theta
"""
if self.momentum_law == isotropic_momentum:
# Analytic solution for isotropic case
Pdr = 0.25*(theta - np.sin(theta)*np.cos(theta))
else:
# Numerical integration for the general case
Pdr, err = scipy.integrate.quad(self._integrand_Pdr, 0.0, theta)
return Pdr*self.axial_momentum_flux
def _integrand_Pdz(self, t):
return 0.5*np.cos(t)*self.momentum_law(t)*np.sin(t)
def _integrand_Pdr(self, t):
return 0.5*np.sin(t)*self.momentum_law(t)*np.sin(t)
class BaseShell(object):
"""Class to represent a two-wind interaction shell"""
def __init__(self, w, w1):
"""The arguments w and w1 should be instances of the class
Wind()
The inner wind, w, should have origin=True, while the outer
wind, w1, should have origin=False
See the Shell() class for an easier to use wrapper around this
class
"""
self.w = w # "inner" wind
self.w1 = w1 # "outer" wind
# We save the values of theta and theta1, so we can use them
# to find an initial estimate of theta1 for the next angle
# theta
self.th1_save = None
self.th_save = None
# Pre-calculate the on-axis radius of the shell
self.beta = self.w.axial_momentum_flux / self.w1.axial_momentum_flux
self.R0 = np.sqrt(self.beta)/(1.0 + np.sqrt(self.beta))
def radius(self, theta, method='brent', full=False):
"""Find the spherical radius of the shell as a function of angle
Should work with scalar or vector argument `theta`. Returns
`radius`, but if positional argument `full` is `True`, then
return tuple: `radius`, `theta1`
"""
def _radius(theta):
"""Helper function to find the shell radius for a single angle, theta"""
if theta == 0.0:
# special treatment for the axis
return self.R0
elif theta >= self.th_infty:
# Indicate that we have gone too far
return -1.0
else:
if method == 'fsolve':
if self.th1_save is None:
# For the first off-axis angle, we use the fact
# that R0 tan(theta) ~= (1 - R0) tan(theta1) for
# small theta
th1_guess = theta*self.R0 / (1.0 - self.R0)
else:
# For subsequent angles, we do geometric extrapolation
th1_guess = self.th1_save*theta/self.th_save
# The tricky bit here is getting th1_guess to be close
# enough to the true solution. If it is not, then the
# solver will fail
theta1 = _solve_for_th1(self.w, self.w1, theta,
th1_guess, method=method)
else:
# Assume other methods require root to be bracketed
# Must be between 0 and th1_infty
if self.th1_save is None:
a, b = 1e-10, self.th1_infty
else:
a, b = self.th1_save, self.th1_infty
theta1 = _solve_for_th1(self.w, self.w1, theta,
bounds=[a, b], method=method)
if DEBUG_LEVEL > 0:
print('+++', self.th_infty - theta, self.th1_infty - theta1)
self.th_save = theta
self.th1_save = theta1
return _radius_eq23(theta, theta1)
try:
# case where theta is iterable
rslt = np.empty_like(theta)
th1_rslt = np.empty_like(theta)
for i, t in enumerate(theta):
r = _radius(t)
if r > 0.0:
rslt[i] = r
th1_rslt[i] = self.th1_save
else:
# assume we have got to th_max
# so fill the remainder with NaNs
rslt[i:] = np.nan
th1_rslt[i:] = np.nan
break
if full:
return rslt, th1_rslt
else:
return rslt
except TypeError:
# fall-over case where theta is scalar
if full:
return _radius(theta), self.th1_save
else:
return _radius(theta)
class Shell(BaseShell):
"""Easy-to-use class to represent a two-wind interaction shell"""
def __init__(self, beta=1.0, innertype="isotropic", outertype="isotropic", xi=None):
"""Parameters:
beta: axial momentum flux ratio (inner/outer)
innertype: either 'proplyd' or 'isotropic'
outertype: must be 'isotropic'
"""
global MOMENTUM_K
if innertype == "anisotropic":
mlaw = anisotropic_momentum
# xi = 2 / (k + 2) => k = (2/xi) - 2
MOMENTUM_K = 2.0/xi - 2.0
elif innertype == "proplyd":
mlaw = proplyd_momentum
elif innertype == "isotropic":
mlaw = isotropic_momentum
else:
raise NotImplementedError("Inner wind must be isotropic or proplyd")
if not outertype == "isotropic":
raise NotImplementedError("Outer wind must be isotropic for now")
w = Wind(axial_momentum_flux=beta, momentum_law=mlaw, origin=True)
w1 = Wind(origin=False)
# Initialise the base class
BaseShell.__init__(self, w, w1)
# Asymptotic angles for far wing/tail
a, b = 1.0001*np.pi/2.0, np.pi
if innertype == 'anisotropic':
self.th_infty = scipy.optimize.brentq(_finf, a, b, args=(beta, xi))
elif innertype == 'isotropic':
self.th_infty = scipy.optimize.brentq(_finfCRW, a, b, args=(beta,))
elif innertype == 'proplyd':
self.th_infty = scipy.optimize.brentq(_finf, a, b, args=(beta, 0.8))
self.th1_infty = np.pi - self.th_infty
###
### Private functions - these are implementation details that should
### not be accesible from outside
###
def _finf(th, beta, xi):
"""Function that gives f(theta) = 0 when theta = theta_infty
Version for hemispheric flow with anisotropy xi
"""
k = 2./xi-2
C = (k+2*(1-beta))/(k+2)
I = np.sqrt(np.pi)*gamma_func(0.5*(k+1))/(4*gamma_func(0.5*k+2))
D = np.pi + 2*beta*I
return th - C*np.tan(th) - D
def _finfCRW(th, beta):
"""Function that gives f(theta) = 0 when theta = theta_infty
Version for spherically symmetric flow, as in CRW
"""
return th - np.tan(th) - np.pi/(1.0 -beta)
def _radius_eq6(w, w1, th, th1):
"""Literal implementation of CRW96 Eq 6 for two winds w, w1
Returns the radius for a given pair of angles th and th1 in terms
of the momentum rates injected by the two winds
"""
numerator = w.Jdot(th) + w1.Jdot(th1)
denominator = (w.Pidot_r(th) + w1.Pidot_r(th1))*np.cos(th) \
- (w.Pidot_z(th) + w1.Pidot_z(th1))*np.sin(th)
return numerator/denominator
def _radius_eq23(th, th1):
"""
Literal implementation of CRW Eq 23
Gives the radius in terms of the two angles th and th1
"""
return np.sin(th1)/np.sin(th+th1)
def _solve_for_th1(w, w1, th, th1_estimate=None, bounds=None, method='brent'):
"""For two winds (w and w1) and an angle (th) wrt the origin of w,
find the angle th1 wrt the origin of w1
It is necessary to give an initial estimate (th1_estimate) for
th1.
Note that the internal function call is very expensive, since it
potentially includes numerical integrations. But who cares,
right?!
"""
def _f(th1, w, w1, th):
"""This should be zero when we have the correct th1"""
return _radius_eq6(w, w1, th, th1) - _radius_eq23(th, th1)
if method == 'fsolve':
# This was the original method
assert th1_estimate is not None, 'fsolve method needs guess for th1'
th1, = scipy.optimize.fsolve(_f, th1_estimate, args=(w, w1, th))
elif method == 'brent':
assert bounds is not None and len(bounds) >= 2, 'Bounds must be 2-sequence'
fa = _f(bounds[0], w, w1, th)
fb = _f(bounds[1], w, w1, th)
if DEBUG_LEVEL > 0:
print(bounds, [fa, fb])
if fa*fb < 0.0:
# Hurray, we have bracketed the root
a, b = bounds
else:
# Look for a change of sign in the middle somewhere
xgrid = np.linspace(bounds[0], bounds[1], 2000)
# Make sure it has the opposite sign from upper bound
fgrid = -fb*np.array([_f(x, w, w1, th) for x in xgrid])
if not np.any(fgrid > 0.0):
# No roots at all - this is bad... bail out
return np.nan
i0 = np.argmax(fgrid)
a, b = xgrid[i0], bounds[1]
if DEBUG_LEVEL > 0:
print(xgrid[i0], fgrid[i0])
th1 = scipy.optimize.brentq(_f, a=a, b=b, args=(w, w1, th))
else:
raise NotImplementedError
return th1
###
### What happens if we run the module as a script
###
if __name__ == "__main__":
# Define a shell between two equal and isotropic winds
shell = Shell(beta=1.0)
# Define an array of angles
theta = np.linspace(0.0, np.pi)
# Calculate the shell radius for each angle
R = shell.radius(theta)
# Print the z coordinate of the shell: R cos(theta)
print(R*np.cos(theta)) # These should all be 0.5
# Now do the same for the proplyd case
shell = Shell(beta=1.0, innertype="proplyd")
R = shell.radius(theta)
print(R*np.cos(theta))
| null |
Programs/equation6.py
|
equation6.py
|
py
| 13,580 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.sqrt",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "numpy.cos",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.integrate.quad",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.integrate",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "scipy.integrate",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "numpy.sin",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.integrate.quad",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.integrate",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "scipy.integrate",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "numpy.cos",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "numpy.empty_like",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "numpy.empty_like",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 217,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 262,
"usage_type": "attribute"
},
{
"api_name": "scipy.integrate.optimize.brentq",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.optimize",
"line_number": 264,
"usage_type": "attribute"
},
{
"api_name": "scipy.integrate",
"line_number": 264,
"usage_type": "name"
},
{
"api_name": "scipy.integrate.optimize.brentq",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.optimize",
"line_number": 266,
"usage_type": "attribute"
},
{
"api_name": "scipy.integrate",
"line_number": 266,
"usage_type": "name"
},
{
"api_name": "scipy.integrate.optimize.brentq",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.optimize",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "scipy.integrate",
"line_number": 268,
"usage_type": "name"
},
{
"api_name": "numpy.pi",
"line_number": 270,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 284,
"usage_type": "attribute"
},
{
"api_name": "scipy.special.gamma",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 285,
"usage_type": "attribute"
},
{
"api_name": "numpy.tan",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "numpy.tan",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 294,
"usage_type": "attribute"
},
{
"api_name": "numpy.cos",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.optimize.fsolve",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.optimize",
"line_number": 337,
"usage_type": "attribute"
},
{
"api_name": "scipy.integrate",
"line_number": 337,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "numpy.any",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 354,
"usage_type": "attribute"
},
{
"api_name": "numpy.argmax",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.optimize.brentq",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.optimize",
"line_number": 359,
"usage_type": "attribute"
},
{
"api_name": "scipy.integrate",
"line_number": 359,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 374,
"usage_type": "attribute"
},
{
"api_name": "numpy.cos",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 386,
"usage_type": "call"
}
] |
643155825
|
import gym
import matplotlib
import numpy as np
env = gym.make("MountainCar-v0")
DESCRETE_OS_SIZE = [20] * len(env.observation_space.high)
descrete_state_window_size = (env.observation_space.high - env.observation_space.low) / DESCRETE_OS_SIZE
LEARNING_RATE = 0.1
DISCOUNT = 0.95
EPISODES = 5000
epsilon = 0.5
START_EPSILON_VALUE = 1
END_EPSILON_VALUE = EPISODES // 2
epsilon_decay_value = epsilon / (END_EPSILON_VALUE - START_EPSILON_VALUE)
def get_descrete_state(state):
descrete_state = (state - env.observation_space.low) / descrete_state_window_size
return tuple(descrete_state.astype(int))
q_table = np.random.uniform(low = -2, high = 0, size = (DESCRETE_OS_SIZE + [env.action_space.n]))
for episode in range(EPISODES):
if True:
print(episode)
render = True
else:
render = False
descrete_state = get_descrete_state(env.reset())
done = False
while not done:
action = np.argmax(q_table[descrete_state])
new_state, reward, done, _ = env.step(action)
if render:
env.render()
new_descrete_state = get_descrete_state(new_state)
if not done:
current_q = q_table[descrete_state + (action,)]
future_q = np.max(q_table[new_descrete_state])
updated_q = (1- LEARNING_RATE) * current_q + LEARNING_RATE * (reward + DISCOUNT * future_q)
q_table[descrete_state + (action,)] = updated_q
elif new_state[0] > env.goal_position:
print("we are done at", episode)
q_table[descrete_state + (action,)] = 0
descrete_state = new_descrete_state
if END_EPSILON_VALUE >= episode >= START_EPSILON_VALUE:
epsilon-=epsilon_decay_value
env.close()
| null |
MountainCar.py
|
MountainCar.py
|
py
| 1,795 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "gym.make",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.random.uniform",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "numpy.argmax",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 41,
"usage_type": "call"
}
] |
57717416
|
""" Figures for the first SST OOD paper"""
import os, sys
import numpy as np
import glob
import matplotlib as mpl
import matplotlib.gridspec as gridspec
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.ticker as mticker
import cartopy.crs as ccrs
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
mpl.rcParams['font.family'] = 'stixgeneral'
import healpy as hp
import h5py
import pandas
import seaborn as sns
from ulmo.analysis import cc as ulmo_cc
from ulmo import plotting
from ulmo.utils import image_utils
from ulmo.utils import models as model_utils
from ulmo.utils import utils as utils
from ulmo import defs
from IPython import embed
# Local
sys.path.append(os.path.abspath("../Analysis/py"))
import results
extract_path = defs.extract_path
model_path = defs.model_path
eval_path = defs.eval_path
def fig_db_by_month(outfile):
# Load db
anom_db = pandas.read_hdf('../Analysis/MODIS_2010_100clear_48x48_log_probs.hdf')
N10 = int(np.round(0.1*len(anom_db)))
i10 = np.argsort(anom_db.log_likelihood.values)[0:N10]
ih10 = np.argsort(anom_db.log_likelihood.values)[-N10:]
# Months
months = np.array([idate.month for idate in anom_db.date])
# Bin em
ibins = np.arange(14)
H_all, bins = np.histogram(months, bins=ibins)
bincentres = [(bins[i] + bins[i + 1]) / 2. for i in range(len(bins) - 1)]
H_10, _ = np.histogram(months[i10], bins=ibins) # Outliers
H_h10, _ = np.histogram(months[ih10], bins=ibins) # Inliers
# Figure time
fig = plt.figure(figsize=(7, 5))
plt.clf()
ax = plt.gca()
for H, clr, cat in zip([H_all, H_10, H_h10], ['k', 'r', 'b'], ['All', 'Lowest 10%', 'Highest 10%']):
plt.step(bincentres, H, where='mid', color=clr, label='{}'.format(cat))
# Labels
ax.set_ylabel(r'$N$')
ax.set_xlabel('Month')
#ax.set_yscale('log')
ax.minorticks_on()
legend = plt.legend(loc='lower right', scatterpoints=1, borderpad=0.3,
handletextpad=0.3, fontsize='large', numpoints=1)
# Layout and save
# plt.tight_layout(pad=0.5, h_pad=0.5, w_pad=0.5)
plt.savefig(outfile, dpi=300)
plt.close()
print('Wrote {:s}'.format(outfile))
def fig_db_by_meanT(outfile):
# Load db
anom_db = pandas.read_hdf('../Analysis/MODIS_2010_100clear_48x48_log_probs.hdf')
N10 = int(np.round(0.1*len(anom_db)))
i10 = np.argsort(anom_db.log_likelihood.values)[0:N10]
ih10 = np.argsort(anom_db.log_likelihood.values)[-N10:]
# Months
avgT = anom_db.mean_temperature.values
# Bin em
ibins = np.arange(0, 40, 5)
H_all, bins = np.histogram(avgT, bins=ibins)
bincentres = [(bins[i] + bins[i + 1]) / 2. for i in range(len(bins) - 1)]
H_10, _ = np.histogram(avgT[i10], bins=ibins) # Outliers
H_h10, _ = np.histogram(avgT[ih10], bins=ibins) # Inliers
# Figure time
fig = plt.figure(figsize=(7, 5))
plt.clf()
ax = plt.gca()
for H, clr, cat in zip([H_all, H_10, H_h10], ['k', 'r', 'b'], ['All', 'Lowest 10%', 'Highest 10%']):
plt.step(bincentres, H, where='mid', color=clr, label='{}'.format(cat))
# Labels
ax.set_ylabel(r'$N$')
ax.set_xlabel(r'$<T>$ (C)')
#ax.set_yscale('log')
ax.minorticks_on()
legend = plt.legend(loc='upper left', scatterpoints=1, borderpad=0.3,
handletextpad=0.3, fontsize='large', numpoints=1)
# Layout and save
# plt.tight_layout(pad=0.5, h_pad=0.5, w_pad=0.5)
plt.savefig(outfile, dpi=300)
plt.close()
print('Wrote {:s}'.format(outfile))
def fig_CC(outfile):
"""
CC fraction
"""
# Build by ulmo/analysis/cc.py
tst_file = os.path.join(os.getenv('SST_OOD'), 'Analysis', 'cc_2010.h5')
# Load data
f = h5py.File(tst_file, mode='r')
fracCC = f['fracCC'][:]
# Average
mean_fCC = np.mean(fracCC, axis=0)
#embed(header='136 of figs')
# Differential
diff_CC = mean_fCC - np.roll(mean_fCC, -1)
diff_CC[-1] = mean_fCC[-1]
yzero = np.zeros_like(diff_CC)
# Figure time
fig = plt.figure(figsize=(7, 5))
plt.clf()
ax = plt.gca()
# Plot
p1 = ax.plot(1-ulmo_cc.CC_values, diff_CC, 'o', color='b', label='Fraction')
#p1 = ax.fill_between(np.array(1-ulmo_cc.CC_values), yzero, diff_CC,
# step='mid',
# alpha=0.5,
# color='blue',
# label='Differential')
# Labels
ax.set_ylabel(r'Fraction of Total Images')
ax.set_xlabel(r'Clear Fraction (CF=1-CC)')
ax.set_ylim(0., 0.05)
#ax.set_ylim(0., 1.0)
# Font size
fsz = 15.
set_fontsize(ax, fsz)
'''
# Cumulative
axC = ax.twinx()
axC.set_ylim(0., 1.)
p2 = axC.plot(1-ulmo_cc.CC_values[1:], mean_fCC[1:], color='k', label='Cumulative')
axC.set_ylabel(r'Cumulative Distribution')
set_fontsize(axC, fsz)
#ax.set_yscale('log')
#ax.minorticks_on()
#plts = p1 + p2
plts = p2
labs = [p.get_label() for p in plts]
legend = plt.legend(plts, labs, loc='upper right', scatterpoints=1, borderpad=0.3,
handletextpad=0.3, fontsize='large', numpoints=1)
'''
# Layout and save
# plt.tight_layout(pad=0.5, h_pad=0.5, w_pad=0.5)
plt.savefig(outfile, dpi=300)
plt.close()
print('Wrote {:s}'.format(outfile))
def img_exmple(iexmple=4, cherry=False):
prob_file = os.path.join(eval_path,
'MODIS_R2019_2010_95clear_128x128_preproc_std_log_probs.csv')
table_files = [prob_file]
# Find a good example
print("Grabbing an example")
df = results.load_log_prob('std', table_files=table_files)
if cherry:
bools = np.all([df.filename.values == 'AQUA_MODIS.20100619T062008.L2.SST.nc',
df.row.values == 253, df.column.values == 924], axis=0)
icherry = np.where(bools)[0][0]
# Replace
example = df.iloc[icherry]
else:
cloudy = df.clear_fraction > 0.045
df = df[cloudy]
i_LL = np.argsort(df.log_likelihood.values)
# One, psuedo-random
example = df.iloc[i_LL[iexmple]]
return example
def fig_in_painting(outfile, iexmple=4, vmnx=(8, 24)):
"""
Parameters
----------
outfile
iexpmle
vmnx
Returns
-------
"""
example = img_exmple(iexmple=iexmple)
# Grab it
field, mask = image_utils.grab_img(example, 'Extracted', ptype='std')
masked_field = field.copy()
masked_field[mask == 1] = -np.nan
# Plot
fig = plt.figure(figsize=(10, 4))
pal, cm = plotting.load_palette()
plt.clf()
gs = gridspec.GridSpec(1,2)
# Before in-painting
ax1 = plt.subplot(gs[0])
sns.heatmap(masked_field, ax=ax1, xticklabels=[], yticklabels=[], cmap=cm,
vmin=vmnx[0], vmax=vmnx[1])
ax2 = plt.subplot(gs[1])
sns.heatmap(field, ax=ax2, xticklabels=[], yticklabels=[], cmap=cm,
vmin=vmnx[0], vmax=vmnx[1])
# Layout and save
# plt.tight_layout(pad=0.5, h_pad=0.5, w_pad=0.5)
plt.savefig(outfile, dpi=300)
plt.close()
print('Wrote {:s}'.format(outfile))
def fig_spatial_all(pproc, outfile, nside=64):
"""
Spatial distribution of the evaluations
Parameters
----------
pproc
outfile
nside
Returns
-------
"""
# Load
evals_tbl = results.load_log_prob(pproc, feather=True)
lbl = 'evals'
use_log = True
use_mask = True
# Healpix me
hp_events, hp_lons, hp_lats = image_utils.evals_to_healpix(
evals_tbl, nside, log=use_log, mask=use_mask)
fig = plt.figure(figsize=(12, 8))
plt.clf()
hp.mollview(hp_events, min=0, max=4.,
hold=True,
cmap='Blues',
flip='geo', title='', unit=r'$\log_{10} \, N_{\rm '+'{}'.format(lbl)+'}$',
rot=(0., 180., 180.))
#plt.gca().coastlines()
# Layout and save
plt.savefig(outfile, dpi=300)
plt.close()
print('Wrote {:s}'.format(outfile))
def fig_spatial_outliers(pproc, outfile, nside=64):
"""
Spatial distribution of the evaluations
Parameters
----------
pproc
outfile
nside
Returns
-------
"""
# Load
evals_tbl = results.load_log_prob(pproc, feather=True)
cohort = 'outliers'
point1 = int(0.001 * len(evals_tbl))
isortLL = np.argsort(evals_tbl.log_likelihood)
evals_tbl = evals_tbl.iloc[isortLL[0:point1]]
lbl = 'outliers'
use_mask = True
use_log = True
# Healpix me
hp_events, hp_lons, hp_lats = image_utils.evals_to_healpix(
evals_tbl, nside, log=use_log, mask=use_mask)
fig = plt.figure(figsize=(12, 8))
plt.clf()
tformM = ccrs.Mollweide()
tformP = ccrs.PlateCarree()
ax = plt.axes(projection=tformM)
if cohort == 'all':
cm = plt.get_cmap('Blues')
img = ax.tricontourf(hp_lons, hp_lats, hp_events, transform=tformM,
levels=20, cmap=cm)#, zorder=10)
else:
cm = plt.get_cmap('Reds')
# Cut
good = np.invert(hp_events.mask)
img = plt.scatter(x=hp_lons[good],
y=hp_lats[good],
c=hp_events[good],
cmap=cm,
s=1,
transform=tformP)
# Colorbar
cb = plt.colorbar(img, orientation='horizontal', pad=0.)
clbl=r'$\log_{10} \, N_{\rm '+'{}'.format(lbl)+'}$'
cb.set_label(clbl, fontsize=20.)
cb.ax.tick_params(labelsize=17)
# Coast lines
if cohort == 'outliers':
ax.coastlines(zorder=10)
ax.set_global()
if cohort != 'all':
gl = ax.gridlines(crs=ccrs.PlateCarree(), linewidth=1,
color='black', alpha=0.5, linestyle=':', draw_labels=True)
gl.xlabels_top = False
gl.ylabels_left = True
gl.ylabels_right=False
gl.xlines = True
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'color': 'black'}# 'weight': 'bold'}
gl.ylabel_style = {'color': 'black'}# 'weight': 'bold'}
#gl.xlocator = mticker.FixedLocator([-180., -160, -140, -120, -60, -20.])
#gl.xlocator = mticker.FixedLocator([-240., -180., -120, -65, -60, -55, 0, 60, 120.])
#gl.ylocator = mticker.FixedLocator([0., 15., 30., 45, 60.])
# Layout and save
plt.savefig(outfile, dpi=300)
plt.close()
print('Wrote {:s}'.format(outfile))
def fig_inlier_vs_outlier(outfile='fig_inlier_vs_outlier.png'):
"""
Spatial distribution of the evaluations
Parameters
----------
outfile
"""
tformP = ccrs.PlateCarree()
# Load
evals_tbl = results.load_log_prob('std', feather=True)
# Add in DT
if 'DT' not in evals_tbl.keys():
evals_tbl['DT'] = evals_tbl.T90 - evals_tbl.T10
# Cut on DT
cut2 = np.abs(evals_tbl.DT.values-2.) < 0.05
cut_evals = evals_tbl[cut2].copy()
lowLL = np.percentile(cut_evals.log_likelihood, 10.)
hiLL = np.percentile(cut_evals.log_likelihood, 90.)
low = cut_evals.log_likelihood < lowLL
high = cut_evals.log_likelihood > hiLL
fig = plt.figure()#figsize=(14, 8))
plt.clf()
ax = plt.axes(projection=tformP)
# Low
lw = 0.5
psize = 5.
img = plt.scatter(
x=cut_evals.longitude[low],
y=cut_evals.latitude[low],
edgecolors='b',
facecolors='none',
s=psize,
lw=lw,
transform=tformP)
# High
img = plt.scatter(
x=cut_evals.longitude[high],
y=cut_evals.latitude[high],
edgecolors='r',
facecolors='none',
s=psize,
lw=lw,
transform=tformP)
# Coast lines
ax.coastlines(zorder=10)
ax.set_global()
# Layout and save
plt.savefig(outfile, dpi=300)
plt.close()
print('Wrote {:s}'.format(outfile))
def tst():
import matplotlib.pyplot as plt
import numpy as np
import cartopy.crs as ccrs
fig = plt.figure(figsize=(12, 8))
plt.clf()
ax = plt.axes(projection=ccrs.Mollweide())
cm = plt.get_cmap('Greens')
hp_lons = np.random.random(100) * 360 - 180
hp_lats = np.random.random(100) * 90 - 45
hp_events = np.random.random(100)
# Cut down
img = ax.tricontourf(hp_lons, hp_lats, hp_events, transform=ccrs.PlateCarree(),
levels=20, cmap=cm, zorder=10)
# Colorbar
cb = plt.colorbar(img, orientation='horizontal', pad=0.)
clbl = r'$\log_{10} \, N$'
cb.set_label(clbl, fontsize=20.)
ax.coastlines(zorder=10)
ax.set_global()
plt.show()
def fig_auto_encode(outfile, iexmple=4, vmnx=(-5, 5)):
"""
Reconstruction image
Parameters
----------
outfile
iexmple
vmnx
Returns
-------
"""
all_evals_tbl = results.load_log_prob('std', feather=True)
cherry = np.all([all_evals_tbl.filename.values == 'AQUA_MODIS.20100619T062008.L2.SST.nc',
all_evals_tbl.row.values == 253, all_evals_tbl.column.values == 924], axis=0)
icherry = np.where(cherry)[0][0]
# Replace
example = all_evals_tbl.iloc[icherry]
# Grab it
field, mask = image_utils.grab_img(example, 'PreProc', ptype='std')
fields = np.reshape(field, (1,1,64,64))
# Load up the model
pae = model_utils.load('standard')
# Reconstruct
recons = pae.reconstruct(fields)
# Plot
fig = plt.figure(figsize=(10, 4))
pal, cm = plotting.load_palette()
plt.clf()
gs = gridspec.GridSpec(1,2)
# Original
ax1 = plt.subplot(gs[0])
sns.heatmap(field[0,...], ax=ax1, xticklabels=[], yticklabels=[], cmap=cm,
vmin=vmnx[0], vmax=vmnx[1])
# Reconstructed
ax2 = plt.subplot(gs[1])
sns.heatmap(recons[0,0,...], ax=ax2, xticklabels=[], yticklabels=[], cmap=cm,
vmin=vmnx[0], vmax=vmnx[1])
# Layout and save
# plt.tight_layout(pad=0.5, h_pad=0.5, w_pad=0.5)
plt.savefig(outfile, dpi=300)
plt.close()
print('Wrote {:s}'.format(outfile))
def fig_LL_SSTa(outfile):
"""
LL distribution
Parameters
----------
outfile
Returns
-------
"""
evals_tbl = results.load_log_prob('std', feather=True)
logL = evals_tbl.log_likelihood.values
isort = np.argsort(logL)
LL_a = logL[isort[int(len(logL)*0.001)]]
print("median logL = {}".format(np.median(logL)))
# Plot
fig = plt.figure(figsize=(10, 4))
plt.clf()
gs = gridspec.GridSpec(1,1)
# Original
ax = plt.subplot(gs[0])
low_logL = np.quantile(logL, 0.05)
high_logL = np.quantile(logL, 0.95)
sns.distplot(logL)
plt.axvline(low_logL, linestyle='--', c='r')
plt.axvline(high_logL, linestyle='--', c='r')
fsz = 17.
plt.xlabel('Log Likelihood (LL)', fontsize=fsz)
plt.ylabel('Probability Density', fontsize=fsz)
# Inset for lowest LL
cut_LL = LL_a
lowLL = logL < cut_LL
axins = ax.inset_axes([0.1, 0.3, 0.57, 0.57])
#axins.scatter(evals_tbl.date.values[lowLL], evals_tbl.log_likelihood.values[lowLL])
#bins = np.arange(-6000., -1000., 250)
#out_hist, out_bins = np.histogram(logL[lowLL], bins=bins)
#embed(header='316 of figs')
#axins.hist(logL[lowLL], color='k')
axins.scatter(evals_tbl.log_likelihood.values[lowLL],
evals_tbl.date.values[lowLL], s=0.1)
#axins.axvline(LL_a, color='k', ls='--')
axins.set_xlim(-8000., cut_LL)
axins.minorticks_on()
axins.set_title('Outliers (lowest 0.1% in LL)')
plt.gcf().autofmt_xdate()
# Layout and save
# plt.tight_layout(pad=0.5, h_pad=0.5, w_pad=0.5)
plt.savefig(outfile, dpi=300)
plt.close()
print('Wrote {:s}'.format(outfile))
def fig_brazil(outfile='fig_brazil.png'):
"""
Brazil
Parameters
----------
Returns
-------
"""
evals_tbl = results.load_log_prob('std', feather=True)
# Add in DT
if 'DT' not in evals_tbl.keys():
evals_tbl['DT'] = evals_tbl.T90 - evals_tbl.T10
# Brazil
in_brazil = ((np.abs(evals_tbl.longitude.values + 57.5) < 10.) &
(np.abs(evals_tbl.latitude.values + 43.0) < 10))
in_DT = np.abs(evals_tbl.DT - 2.05) < 0.05
evals_bz = evals_tbl[in_brazil & in_DT].copy()
# Rectangles
#R2 = dict(lon=-60., dlon=1.,
# lat=-41.5, dlat=1.5)
R2 = dict(lon=-61.0, dlon=1.,
lat=-45., dlat=2)
R1 = dict(lon=-56.5, dlon=1.5,
lat=-45, dlat=2)
logL = evals_bz.log_likelihood.values
lowLL_val = np.percentile(logL, 10.)
hiLL_val = np.percentile(logL, 90.)
# Plot
fig = plt.figure(figsize=(8, 8))
plt.clf()
gs = gridspec.GridSpec(11,11)
tformP = ccrs.PlateCarree()
ax_b = plt.subplot(gs[:5, :6], projection=tformP)
ax_b.text(0.05, 1.03, '(a)', transform=ax_b.transAxes,
fontsize=15, ha='left', color='k')
# LL near Argentina!
psize = 0.5
cm = plt.get_cmap('coolwarm')
img = plt.scatter(
x=evals_bz.longitude,
y=evals_bz.latitude,
s=psize,
c=evals_bz.log_likelihood,
cmap=cm,
vmin=lowLL_val,
vmax=hiLL_val,
transform=tformP)
plt.ylabel('Latitude')
plt.xlabel('Longitude')
# Color bar
cb = plt.colorbar(img, fraction=0.020, pad=0.04)
cb.ax.set_title('LL', fontsize=11.)
# Draw rectangles
for lbl, R, ls in zip(['R1', 'R2'], [R1, R2], ['k-', 'k--']):
xvals = R['lon']-R['dlon'], R['lon']+R['dlon'], R['lon']+R['dlon'], R['lon']-R['dlon'], R['lon']-R['dlon']
yvals = R['lat']-R['dlat'], R['lat']-R['dlat'], R['lat']+R['dlat'], R['lat']+R['dlat'], R['lat']-R['dlat']
ax_b.plot(xvals, yvals, ls, label=lbl)
gl = ax_b.gridlines(crs=ccrs.PlateCarree(), linewidth=1,
color='black', alpha=0.5, linestyle='--', draw_labels=True)
gl.xlabels_top = False
gl.ylabels_left = True
gl.ylabels_right=False
gl.xlines = True
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'color': 'black'}# 'weight': 'bold'}
gl.ylabel_style = {'color': 'black'}# 'weight': 'bold'}
#gl.xlocator = mticker.FixedLocator([-180., -160, -140, -120, -60, -20.])
#gl.xlocator = mticker.FixedLocator([-240., -180., -120, -65, -60, -55, 0, 60, 120.])
#gl.ylocator = mticker.FixedLocator([0., 15., 30., 45, 60.])
plt.gca().coastlines()
# Bathymetry
df_200 = pandas.read_csv('Patagonian_Bathymetry_200m.txt')
cut_df200 = (df_200.lat > -50.) & (df_200.lon > -65.) & (df_200.lat < -33.)
img2 = plt.scatter(
x=df_200[cut_df200].lon,
y=df_200[cut_df200].lat,
s=0.05,
color='green',
transform=tformP, label='200m')
'''
df_2500 = pandas.read_csv('Patagonian_Bathymetry_2500m.txt')
cut_df2500 = (df_2500.lat > -50.) & (df_2500.lon > -65.) & (df_2500.lat < -33.) & (
df_2500.lon < -50.)
img3 = plt.scatter(
x=df_2500[cut_df2500].lon,
y=df_2500[cut_df2500].lat,
s=0.05,
color='orange',
transform=tformP, label='2500m')
'''
legend = plt.legend(loc='upper left', scatterpoints=1, borderpad=0.3,
handletextpad=0.3, fontsize=11, numpoints=1)
# ######################################################################33
# ######################################################################33
# Histograms
in_R1, in_R2 = [((np.abs(evals_bz.longitude.values - R['lon']) < R['dlon']) &
(np.abs(evals_bz.latitude.values - R['lat']) < R['dlat'])) for R in [R1,R2]]
evals_bz['Subsample'] = 'null'
evals_bz['Subsample'][in_R1] = 'R1'
evals_bz['Subsample'][in_R2] = 'R2'
legend = plt.legend(loc='upper left', scatterpoints=3, borderpad=0.3,
handletextpad=0.3, fontsize=11, numpoints=1)
# Histograms
in_R1, in_R2 = [((np.abs(evals_bz.longitude.values - R['lon']) < R['dlon']) &
(np.abs(evals_bz.latitude.values - R['lat']) < R['dlat'])) for R in [R1,R2]]
evals_bz['Subsample'] = 'null'
evals_bz['Subsample'][in_R1] = 'R1'
evals_bz['Subsample'][in_R2] = 'R2'
df_rects = pandas.DataFrame(dict(
LL=evals_bz.log_likelihood.values[in_R1 | in_R2],
Subsample=evals_bz.Subsample.values[in_R1 | in_R2]))
ax_h = plt.subplot(gs[:5, 8:])
ax_h.text(0.05, 1.03, '(b)', transform=ax_h.transAxes,
fontsize=15, ha='left', color='k')
sns.histplot(data=df_rects, x='LL',
hue='Subsample', hue_order=['R1', 'R2'], ax=ax_h)
ax_h.set_xlim(-800, 500)
ax_h.set_xlabel('Log Likelihood (LL)')#, fontsize=fsz)
#plt.ylabel('Probability Density', fontsize=fsz)
# Gallery
nGal = 25
#nGal = 1
vmin, vmax = None, None
vmin, vmax = -1, 1
pal, cm = plotting.load_palette()
# R1
idx_R1 = np.where(in_R1)[0]
rand_R1 = np.random.choice(idx_R1, nGal, replace=False)
for ss in range(nGal):
example = evals_bz.iloc[rand_R1[ss]]
field, mask = image_utils.grab_img(example, 'PreProc', ptype='std')
# Axis
row = 6 + ss//5
col = 6 + ss % 5
#
ax_0 = plt.subplot(gs[row, col])
sns.heatmap(field[0], ax=ax_0, xticklabels=[], yticklabels=[], cmap=cm,
vmin=vmin, vmax=vmax, cbar=False)
# R2
idx_R2 = np.where(in_R2)[0]
rand_R2 = np.random.choice(idx_R2, nGal, replace=False)
for ss in range(nGal):
example = evals_bz.iloc[rand_R2[ss]]
field, mask = image_utils.grab_img(example, 'PreProc', ptype='std')
# Axis
row = 6 + ss//5
col = ss % 5
#
ax_0 = plt.subplot(gs[row, col])
sns.heatmap(field[0], ax=ax_0, xticklabels=[], yticklabels=[], cmap=cm,
vmin=vmin, vmax=vmax, cbar=False)
# Layout and save
#plt.tight_layout(pad=0.0, h_pad=0.0, w_pad=0.0)
plt.savefig(outfile, dpi=300)
plt.close()
print('Wrote {:s}'.format(outfile))
def fig_gallery(outfile, ptype, flavor='outlier'):
all_evals_tbl = results.load_log_prob(ptype, feather=True)
# Grab random outliers
#years = [2008, 2009, 2011, 2012]
years = np.arange(2003, 2020, 2)
dyear = 2
ngallery = 9
if flavor == 'outlier':
# Cut
top = 1000
isrt = np.argsort(all_evals_tbl.log_likelihood)
evals_tbl = all_evals_tbl.iloc[isrt[0:top]]
elif flavor == 'inlier':
bottom = 1000
isrt = np.argsort(all_evals_tbl.log_likelihood)
evals_tbl = all_evals_tbl.iloc[isrt[-bottom:]]
else:
raise IOError("Bad flavor")
gallery_tbl = results.random_imgs(evals_tbl, years, dyear)
# Over-ride one?
if flavor == 'outlier' and ptype == 'std':
# AQUA_MODIS.20100619T062008.L2.SST.nc 253 924 40.497738 -59.93214 0.049987793 20.64104652 15.69499969 23.97500038 22.65999985 18.38500023 -1234.1112
cherry = np.all([all_evals_tbl.filename.values == 'AQUA_MODIS.20100619T062008.L2.SST.nc',
all_evals_tbl.row.values == 253, all_evals_tbl.column.values == 924], axis=0)
icherry = np.where(cherry)[0][0]
# Replace
gallery_tbl.iloc[3] = all_evals_tbl.iloc[icherry]
if len(gallery_tbl) < ngallery:
raise ValueError("Uh oh")
# Plot
pal, cm = plotting.load_palette()
fig = plt.figure(figsize=(10, 8))
plt.clf()
gs = gridspec.GridSpec(3,3)
# Original
for ss in range(ngallery):
# Axis
ax = plt.subplot(gs[ss])
# Grab image
example = gallery_tbl.iloc[ss]
field, mask = image_utils.grab_img(example, 'PreProc', ptype=ptype)
# Plot
if ptype == 'loggrad':
vmin, vmax = -5., 0.
else:
vmin, vmax = None, None
sns.heatmap(field[0], ax=ax, xticklabels=[], yticklabels=[], cmap=cm,
vmin=vmin, vmax=vmax)
# Label
lsz = 17.
lclr = 'white'
ax.text(0.05, 0.90, '{}'.format(example.date.strftime('%Y-%m-%d')),
transform=ax.transAxes, fontsize=lsz, ha='left', color=lclr)
ax.text(0.05, 0.80, '{:0.3f},{:0.3f}'.format(example.longitude, example.latitude),
transform=ax.transAxes, fontsize=lsz, ha='left', color=lclr)
# Layout and save
# plt.tight_layout(pad=0.5, h_pad=0.5, w_pad=0.5)
plt.savefig(outfile, dpi=300)
plt.close()
print('Wrote {:s}'.format(outfile))
def fig_LL_vs_DT(ptype, outfile, evals_tbl=None):
#sns.set_theme()
#sns.set_style('whitegrid')
#sns.set_context('paper')
# Load
if evals_tbl is None:
evals_tbl = results.load_log_prob(ptype, feather=True)
# Add in DT
if 'DT' not in evals_tbl.keys():
evals_tbl['DT'] = evals_tbl.T90 - evals_tbl.T10
# Stats
cut2 = np.abs(evals_tbl.DT.values-2.) < 0.05
print("Min LL: {}".format(np.min(evals_tbl.log_likelihood[cut2])))
print("Max LL: {}".format(np.max(evals_tbl.log_likelihood[cut2])))
print("Mean LL: {}".format(np.mean(evals_tbl.log_likelihood[cut2])))
print("RMS LL: {}".format(np.std(evals_tbl.log_likelihood[cut2])))
# Bins
bins_LL = np.linspace(-10000., 1100., 22)
bins_DT = np.linspace(0., 14, 14)
fig = plt.figure(figsize=(12, 8))
plt.clf()
gs = gridspec.GridSpec(1,1)
# Total NSpax
ax_tot = plt.subplot(gs[0])
jg = sns.jointplot(data=evals_tbl, x='DT', y='log_likelihood',
kind='hist', bins=200, marginal_kws=dict(bins=200))
#jg.ax_marg_x.set_xlim(8, 10.5)
#jg.ax_marg_y.set_ylim(0.5, 2.0)
jg.ax_joint.set_xlabel(r'$\Delta T$ (K)')
jg.ax_joint.set_ylabel(r'LL')
xmnx = (0., 14.5)
jg.ax_joint.set_xlim(xmnx[0], xmnx[1])
#ymnx = (-11400., 1700)
#jg.ax_joint.set_ylim(ymnx[0], ymnx[1])
jg.ax_joint.minorticks_on()
# Horizontal line
lowLL_val = np.percentile(evals_tbl.log_likelihood, 0.1)
jg.ax_joint.plot(xmnx, [lowLL_val]*2, '--', color='gray')
'''
# Vertical lines
jg.ax_joint.plot([2.]*2, ymnx, '-', color='gray', lw=1)
jg.ax_joint.plot([2.1]*2, ymnx, '-', color='gray', lw=1)
'''
set_fontsize(jg.ax_joint, 17.)
#jg.ax_joint.yaxis.set_major_locator(plt.MultipleLocator(0.5))
#jg.ax_joint.xaxis.set_major_locator(plt.MultipleLocator(1.0)
# 2D hist
#hist2d(evals_tbl.log_likelihood.values, evals_tbl.DT.values,
# bins=[bins_LL, bins_DT], ax=ax_tot, color='b')
#ax_tot.set_xlabel('LL')
#ax_tot.set_ylabel(r'$\Delta T$')
#ax_tot.set_ylim(0.3, 5.0)
#ax_tot.minorticks_on()
#legend = plt.legend(loc='upper right', scatterpoints=1, borderpad=0.3,
# handletextpad=0.3, fontsize=19, numpoints=1)
#set_fontsize(ax_tot, 19.)
# Layout and save
plt.tight_layout(pad=0.2,h_pad=0.,w_pad=0.1)
plt.savefig(outfile, dpi=300)
plt.close()
print('Wrote {:s}'.format(outfile))
def fig_LL_vs_LL(outfile, evals_tbl_std=None, evals_tbl_grad=None):
# Load
if evals_tbl_std is None:
evals_tbl_std = results.load_log_prob('std', feather=True)
if evals_tbl_grad is None:
evals_tbl_grad = results.load_log_prob('loggrad', feather=True)
# Outliers
point1 = int(0.001 * len(evals_tbl_std))
isortLL_std = np.argsort(evals_tbl_std.log_likelihood)
outliers_std = evals_tbl_std.iloc[isortLL_std[0:point1]]
isortLL_grad = np.argsort(evals_tbl_grad.log_likelihood)
outliers_grad = evals_tbl_grad.iloc[isortLL_grad[0:point1]]
# Std to grad
mtchs = utils.match_ids(outliers_std.UID, evals_tbl_grad.UID, require_in_match=False)
gd_LL = mtchs >= 0
LL_grad_std = evals_tbl_grad.log_likelihood.values[mtchs[gd_LL]]
LL_std = outliers_std.log_likelihood.values[gd_LL]
mtchs2 = utils.match_ids(outliers_grad.UID, evals_tbl_std.UID, require_in_match=False)
gd_LL2 = mtchs2 >= 0
LL_std_grad = evals_tbl_std.log_likelihood.values[mtchs2[gd_LL2]]
LL_grad = outliers_grad.log_likelihood.values[gd_LL2]
'''
# Grab em
LL_grad = []
for kk in range(len(outliers_std)):
iobj = outliers_std.iloc[kk]
gdate = evals_tbl_grad.date == iobj.date
grow = evals_tbl_grad.row == iobj.row
gcol = evals_tbl_grad.column == iobj.column
idx = np.where(gdate & grow & gcol)[0]
if len(idx) == 1:
LL_grad.append(evals_tbl_grad.iloc[idx].log_likelihood.values[0])
else:
LL_grad.append(np.nan)
'''
fig = plt.figure(figsize=(12, 8))
plt.clf()
gs = gridspec.GridSpec(2,1)
#
ax_std = plt.subplot(gs[0])
ax_std.scatter(LL_std, LL_grad_std, s=0.2)
ax_std.set_xlabel('LL SSTa 0.1% Outliers')
ax_std.set_ylabel('LL_grad')
ax_log = plt.subplot(gs[1])
ax_log.scatter(LL_grad, LL_std_grad, s=0.2)
ax_log.set_xlabel(r'LL $\nabla$SST 0.1% Outliers')
ax_log.set_ylabel('LL_std')
set_fontsize(ax_std, 19.)
set_fontsize(ax_log, 19.)
# Layout and save
plt.tight_layout(pad=0.2,h_pad=0.,w_pad=0.1)
plt.savefig(outfile, dpi=300)
plt.close()
print('Wrote {:s}'.format(outfile))
def fig_year_month(outfile, ptype, evals_tbl=None, frac=False,
all=False):
"""
Time evolution in outliers
Parameters
----------
outfile
ptype
evals_tbl
Returns
-------
"""
# Load
if evals_tbl is None:
evals_tbl = results.load_log_prob(ptype, feather=True)
print("Loaded..")
# Outliers
point1 = int(0.001 * len(evals_tbl))
isortLL = np.argsort(evals_tbl.log_likelihood)
outliers = evals_tbl.iloc[isortLL[0:point1]]
# All
if all or frac:
all_years = [item.year for item in evals_tbl.date]
all_months = [item.month for item in evals_tbl.date]
# Parse
years = [item.year for item in outliers.date]
months = [item.month for item in outliers.date]
# Histogram
bins_year = np.arange(2002.5, 2020.5)
bins_month = np.arange(0.5, 13.5)
counts, xedges, yedges = np.histogram2d(months, years,
bins=(bins_month, bins_year))
if all or frac:
all_counts, _, _ = np.histogram2d(all_months, all_years,
bins=(bins_month, bins_year))
fig = plt.figure(figsize=(12, 8))
plt.clf()
gs = gridspec.GridSpec(5,6)
# Total NSpax
ax_tot = plt.subplot(gs[1:,1:-1])
cm = plt.get_cmap('Blues')
if frac:
values = counts.transpose()/all_counts.transpose()
lbl = 'Fraction'
elif all:
cm = plt.get_cmap('Greens')
norm = np.sum(all_counts) / np.product(all_counts.shape)
values = all_counts.transpose()/norm
lbl = 'Fraction (all)'
else:
values = counts.transpose()
lbl = 'Counts'
mplt = ax_tot.pcolormesh(xedges, yedges, values, cmap=cm)
# Color bar
cbaxes = fig.add_axes([0.03, 0.1, 0.05, 0.7])
cb = plt.colorbar(mplt, cax=cbaxes, aspect=20)
#cb.set_label(lbl, fontsize=20.)
cbaxes.yaxis.set_ticks_position('left')
cbaxes.set_xlabel(lbl, fontsize=15.)
ax_tot.set_xlabel('Month')
ax_tot.set_ylabel('Year')
set_fontsize(ax_tot, 19.)
# Edges
fsz = 15.
months = np.mean(values, axis=0)
ax_m = plt.subplot(gs[0,1:-1])
ax_m.step(np.arange(12)+1, months, color='k', where='mid')
set_fontsize(ax_m, fsz)
#ax_m.minorticks_on()
years = np.mean(values, axis=1)
ax_y = plt.subplot(gs[1:,-1])
ax_y.invert_xaxis()
ax_y.step(years, 2003 + np.arange(17), color='k', where='mid')
ax_y.set_xlim(40,80)
set_fontsize(ax_y, fsz)
# Layout and save
plt.tight_layout(pad=0.2,h_pad=0.,w_pad=0.1)
plt.savefig(outfile, dpi=300)
plt.close()
print('Wrote {:s}'.format(outfile))
def set_fontsize(ax,fsz):
'''
Generate a Table of columns and so on
Restrict to those systems where flg_clm > 0
Parameters
----------
ax : Matplotlib ax class
fsz : float
Font size
'''
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fsz)
def hist2d(x, y, bins=20, range=None, weights=None, levels=None, smooth=None,
ax=None, color=None, plot_datapoints=True, plot_density=True,
plot_contours=True, no_fill_contours=False, fill_contours=False,
contour_kwargs=None, contourf_kwargs=None, data_kwargs=None,
**kwargs):
"""
Plot a 2-D histogram of samples.
Parameters
----------
x, y : array_like (nsamples,)
The samples.
bins : int or list
levels : array_like
The contour levels to draw.
ax : matplotlib.Axes (optional)
A axes instance on which to add the 2-D histogram.
plot_datapoints : bool (optional)
Draw the individual data points.
plot_density : bool (optional)
Draw the density colormap.
plot_contours : bool (optional)
Draw the contours.
no_fill_contours : bool (optional)
Add no filling at all to the contours (unlike setting
``fill_contours=False``, which still adds a white fill at the densest
points).
fill_contours : bool (optional)
Fill the contours.
contour_kwargs : dict (optional)
Any additional keyword arguments to pass to the `contour` method.
contourf_kwargs : dict (optional)
Any additional keyword arguments to pass to the `contourf` method.
data_kwargs : dict (optional)
Any additional keyword arguments to pass to the `plot` method when
adding the individual data points.
"""
from matplotlib.colors import LinearSegmentedColormap, colorConverter
from scipy.ndimage import gaussian_filter
if ax is None:
ax = plt.gca()
# Set the default range based on the data range if not provided.
if range is None:
if "extent" in kwargs:
range = kwargs["extent"]
else:
range = [[x.min(), x.max()], [y.min(), y.max()]]
# Set up the default plotting arguments.
if color is None:
color = "k"
# Choose the default "sigma" contour levels.
if levels is None:
levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)
# This is the color map for the density plot, over-plotted to indicate the
# density of the points near the center.
density_cmap = LinearSegmentedColormap.from_list(
"density_cmap", [color, (1, 1, 1, 0)])
# This color map is used to hide the points at the high density areas.
white_cmap = LinearSegmentedColormap.from_list(
"white_cmap", [(1, 1, 1), (1, 1, 1)], N=2)
# This "color map" is the list of colors for the contour levels if the
# contours are filled.
rgba_color = colorConverter.to_rgba(color)
contour_cmap = [list(rgba_color) for l in levels] + [rgba_color]
for i, l in enumerate(levels):
contour_cmap[i][-1] *= float(i) / (len(levels)+1)
# We'll make the 2D histogram to directly estimate the density.
try:
H, X, Y = np.histogram2d(x.flatten(), y.flatten(), bins=bins,
range=range, weights=weights)
except ValueError:
embed(header='732 of figs')
raise ValueError("It looks like at least one of your sample columns "
"have no dynamic range. You could try using the "
"'range' argument.")
if smooth is not None:
if gaussian_filter is None:
raise ImportError("Please install scipy for smoothing")
H = gaussian_filter(H, smooth)
# Compute the density levels.
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
V = np.empty(len(levels))
for i, v0 in enumerate(levels):
try:
V[i] = Hflat[sm <= v0][-1]
except:
V[i] = Hflat[0]
V.sort()
m = np.diff(V) == 0
if np.any(m):
print("Too few points to create valid contours")
while np.any(m):
V[np.where(m)[0][0]] *= 1.0 - 1e-4
m = np.diff(V) == 0
V.sort()
# Compute the bin centers.
X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])
# Extend the array for the sake of the contours at the plot edges.
H2 = H.min() + np.zeros((H.shape[0] + 4, H.shape[1] + 4))
H2[2:-2, 2:-2] = H
H2[2:-2, 1] = H[:, 0]
H2[2:-2, -2] = H[:, -1]
H2[1, 2:-2] = H[0]
H2[-2, 2:-2] = H[-1]
H2[1, 1] = H[0, 0]
H2[1, -2] = H[0, -1]
H2[-2, 1] = H[-1, 0]
H2[-2, -2] = H[-1, -1]
X2 = np.concatenate([
X1[0] + np.array([-2, -1]) * np.diff(X1[:2]),
X1,
X1[-1] + np.array([1, 2]) * np.diff(X1[-2:]),
])
Y2 = np.concatenate([
Y1[0] + np.array([-2, -1]) * np.diff(Y1[:2]),
Y1,
Y1[-1] + np.array([1, 2]) * np.diff(Y1[-2:]),
])
if plot_datapoints:
if data_kwargs is None:
data_kwargs = dict()
data_kwargs["color"] = data_kwargs.get("color", color)
data_kwargs["ms"] = data_kwargs.get("ms", 2.0)
data_kwargs["mec"] = data_kwargs.get("mec", "none")
data_kwargs["alpha"] = data_kwargs.get("alpha", 0.1)
ax.plot(x, y, "o", zorder=-1, rasterized=True, **data_kwargs)
# Plot the base fill to hide the densest data points.
if (plot_contours or plot_density) and not no_fill_contours:
ax.contourf(X2, Y2, H2.T, [V.min(), H.max()],
cmap=white_cmap, antialiased=False)
if plot_contours and fill_contours:
if contourf_kwargs is None:
contourf_kwargs = dict()
contourf_kwargs["colors"] = contourf_kwargs.get("colors", contour_cmap)
contourf_kwargs["antialiased"] = contourf_kwargs.get("antialiased",
False)
ax.contourf(X2, Y2, H2.T, np.concatenate([[0], V, [H.max()*(1+1e-4)]]),
**contourf_kwargs)
# Plot the density map. This can't be plotted at the same time as the
# contour fills.
elif plot_density:
ax.pcolor(X, Y, H.max() - H.T, cmap=density_cmap)
# Plot the contour edge colors.
if plot_contours:
if contour_kwargs is None:
contour_kwargs = dict()
contour_kwargs["colors"] = contour_kwargs.get("colors", color)
ax.contour(X2, Y2, H2.T, V, **contour_kwargs)
ax.set_xlim(range[0])
ax.set_ylim(range[1])
#### ########################## #########################
def main(flg_fig):
if flg_fig == 'all':
flg_fig = np.sum(np.array([2 ** ii for ii in range(25)]))
else:
flg_fig = int(flg_fig)
# Month histogram
if flg_fig & (2 ** 0):
for outfile in ['fig_db_by_month.png', 'fig_db_by_month.pdf']:
fig_db_by_month(outfile)
# <T> histogram
if flg_fig & (2 ** 1):
for outfile in ['fig_db_by_meanT.png', 'fig_db_by_meanT.pdf']:
fig_db_by_meanT(outfile)
# CC figure
if flg_fig & (2 ** 2):
for outfile in ['fig_CC.png']: #, 'fig_CC.pdf']:
fig_CC(outfile)
# Spatial of all evaluations
if flg_fig & (2 ** 3):
#for outfile in ['fig_std_evals_spatial.png']:
# fig_spatial_all('std', outfile)
fig_spatial_outliers('std', 'fig_std_outliers_spatial.png')
# In-painting
if flg_fig & (2 ** 4):
for outfile in ['fig_in_painting.png']:
fig_in_painting(outfile)
# Auto-encode
if flg_fig & (2 ** 5):
for outfile in ['fig_auto_encode.png']:
fig_auto_encode(outfile)
# LL for SSTa
if flg_fig & (2 ** 6):
for outfile in ['fig_LL_SSTa.png']:
fig_LL_SSTa(outfile)
# Outlier gallery
if flg_fig & (2 ** 7):
# Outlier
#for ptype, outfile in zip(['std', 'loggrad'], ['fig_gallery_std.png', 'fig_gallery_loggrad.png']):
for ptype, outfile in zip(['std'], ['fig_gallery_std.png']):
fig_gallery(outfile, ptype)
# Inlier
#for ptype, outfile in zip(['std', 'loggrad'], ['fig_inlier_gallery_std.png', 'fig_inlier_gallery_loggrad.png']):
# fig_gallery(outfile, ptype, flavor='inlier')
# LL vs LL
if flg_fig & (2 ** 8):
for outfile in ['fig_LL_vs_LL.png']:
fig_LL_vs_LL(outfile)
# Year, Month
if flg_fig & (2 ** 9):
# Counts
#for ptype, outfile in zip(['std', 'loggrad'], ['fig_year_month_std.png', 'fig_year_month_loggrad.png']):
for ptype, outfile in zip(['std'], ['fig_year_month_std.png']):
fig_year_month(outfile, ptype)
# Fractional
#for ptype, outfile in zip(['std'], ['fig_year_month_std_frac.png']):
# fig_year_month(outfile, ptype, frac=True)
# All
#for ptype, outfile in zip(['std'], ['fig_year_month_std_all.png']):
# fig_year_month(outfile, ptype, all=True)
# LL vs. DT
if flg_fig & (2 ** 11):
#for ptype, outfile in zip(['std', 'loggrad'],
# ['fig_LL_vs_T_std.png',
# 'fig_LL_vs_T_loggrad.png']):
for ptype, outfile in zip(['std'], ['fig_LL_vs_T_std.png']):
fig_LL_vs_DT(ptype, outfile)
# Spatial of all evaluations
if flg_fig & (2 ** 12):
fig_inlier_vs_outlier()
# Brazil
if flg_fig & (2 ** 13):
fig_brazil()
# LL vs. DT
if flg_fig & (2 ** 20):
tst()
# Command line execution
if __name__ == '__main__':
if len(sys.argv) == 1:
flg_fig = 0
#flg_fig += 2 ** 0 # Month histogram
#flg_fig += 2 ** 1 # <T> histogram
#flg_fig += 2 ** 2 # CC fractions
#flg_fig += 2 ** 3 # All Evals spatial
#flg_fig += 2 ** 4 # In-painting
#flg_fig += 2 ** 5 # Auto-encode
#flg_fig += 2 ** 6 # LL SSTa
#flg_fig += 2 ** 7 # Gallery
#flg_fig += 2 ** 8 # LL_SST vs. LL_grad
#flg_fig += 2 ** 9 # year, month
#flg_fig += 2 ** 11 # LL vs DT
flg_fig += 2 ** 12 # inlier vs outlier for DT = 2
#flg_fig += 2 ** 13 # Brazil
#flg_fig += 2 ** 20 # tst
else:
flg_fig = sys.argv[1]
main(flg_fig)
| null |
papers/I/Figures/py/fig_sst_anomaly_flowI.py
|
fig_sst_anomaly_flowI.py
|
py
| 42,467 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.rcParams",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "ulmo.defs.extract_path",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "ulmo.defs",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "ulmo.defs.model_path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "ulmo.defs",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "ulmo.defs.eval_path",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "ulmo.defs",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "pandas.read_hdf",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.histogram",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.histogram",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.histogram",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.step",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "pandas.read_hdf",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "numpy.histogram",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.histogram",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.histogram",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.step",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "os.getenv",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.roll",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "ulmo.analysis.cc.CC_values",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "ulmo.analysis.cc",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 202,
"usage_type": "attribute"
},
{
"api_name": "results.load_log_prob",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "ulmo.utils.image_utils.grab_img",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "ulmo.utils.image_utils",
"line_number": 240,
"usage_type": "name"
},
{
"api_name": "numpy.nan",
"line_number": 242,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "ulmo.plotting.load_palette",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "ulmo.plotting",
"line_number": 247,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 248,
"usage_type": "name"
},
{
"api_name": "matplotlib.gridspec.GridSpec",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "matplotlib.gridspec",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 252,
"usage_type": "name"
},
{
"api_name": "seaborn.heatmap",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "seaborn.heatmap",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 262,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "results.load_log_prob",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "ulmo.utils.image_utils.evals_to_healpix",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "ulmo.utils.image_utils",
"line_number": 289,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 292,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "healpy.mollview",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 303,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 304,
"usage_type": "name"
},
{
"api_name": "results.load_log_prob",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "ulmo.utils.image_utils.evals_to_healpix",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "ulmo.utils.image_utils",
"line_number": 335,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 338,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 339,
"usage_type": "name"
},
{
"api_name": "cartopy.crs.Mollweide",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "cartopy.crs",
"line_number": 341,
"usage_type": "name"
},
{
"api_name": "cartopy.crs.PlateCarree",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "cartopy.crs",
"line_number": 342,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axes",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 344,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.get_cmap",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 347,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.get_cmap",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 351,
"usage_type": "name"
},
{
"api_name": "numpy.invert",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 354,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.colorbar",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 362,
"usage_type": "name"
},
{
"api_name": "cartopy.crs.PlateCarree",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "cartopy.crs",
"line_number": 373,
"usage_type": "name"
},
{
"api_name": "cartopy.mpl.gridliner.LONGITUDE_FORMATTER",
"line_number": 379,
"usage_type": "name"
},
{
"api_name": "cartopy.mpl.gridliner.LATITUDE_FORMATTER",
"line_number": 380,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 389,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 390,
"usage_type": "name"
},
{
"api_name": "cartopy.crs.PlateCarree",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "cartopy.crs",
"line_number": 402,
"usage_type": "name"
},
{
"api_name": "results.load_log_prob",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 415,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 420,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 420,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 421,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 421,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axes",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 422,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 427,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 437,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 451,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 452,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 452,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 461,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 462,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axes",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 464,
"usage_type": "name"
},
{
"api_name": "cartopy.crs.Mollweide",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "cartopy.crs",
"line_number": 464,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.get_cmap",
"line_number": 466,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 466,
"usage_type": "name"
},
{
"api_name": "numpy.random.random",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 468,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.random",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 469,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.random",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 470,
"usage_type": "attribute"
},
{
"api_name": "cartopy.crs.PlateCarree",
"line_number": 473,
"usage_type": "call"
},
{
"api_name": "cartopy.crs",
"line_number": 473,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.colorbar",
"line_number": 477,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 477,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 484,
"usage_type": "name"
},
{
"api_name": "results.load_log_prob",
"line_number": 501,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 502,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 504,
"usage_type": "call"
},
{
"api_name": "ulmo.utils.image_utils.grab_img",
"line_number": 509,
"usage_type": "call"
},
{
"api_name": "ulmo.utils.image_utils",
"line_number": 509,
"usage_type": "name"
},
{
"api_name": "numpy.reshape",
"line_number": 510,
"usage_type": "call"
},
{
"api_name": "ulmo.utils.models.load",
"line_number": 513,
"usage_type": "call"
},
{
"api_name": "ulmo.utils.models",
"line_number": 513,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 518,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 518,
"usage_type": "name"
},
{
"api_name": "ulmo.plotting.load_palette",
"line_number": 519,
"usage_type": "call"
},
{
"api_name": "ulmo.plotting",
"line_number": 519,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 520,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 520,
"usage_type": "name"
},
{
"api_name": "matplotlib.gridspec.GridSpec",
"line_number": 521,
"usage_type": "call"
},
{
"api_name": "matplotlib.gridspec",
"line_number": 521,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 524,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 524,
"usage_type": "name"
},
{
"api_name": "seaborn.heatmap",
"line_number": 525,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 529,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 529,
"usage_type": "name"
},
{
"api_name": "seaborn.heatmap",
"line_number": 530,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 535,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 535,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 536,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 536,
"usage_type": "name"
},
{
"api_name": "results.load_log_prob",
"line_number": 553,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 556,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 559,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 562,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 562,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 563,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 563,
"usage_type": "name"
},
{
"api_name": "matplotlib.gridspec.GridSpec",
"line_number": 564,
"usage_type": "call"
},
{
"api_name": "matplotlib.gridspec",
"line_number": 564,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 567,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 567,
"usage_type": "name"
},
{
"api_name": "numpy.quantile",
"line_number": 569,
"usage_type": "call"
},
{
"api_name": "numpy.quantile",
"line_number": 570,
"usage_type": "call"
},
{
"api_name": "seaborn.distplot",
"line_number": 571,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.axvline",
"line_number": 572,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 572,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axvline",
"line_number": 573,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 573,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 575,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 575,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 576,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 576,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gcf",
"line_number": 593,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 593,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 598,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 598,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 599,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 599,
"usage_type": "name"
},
{
"api_name": "results.load_log_prob",
"line_number": 614,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 621,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 622,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 623,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 636,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 637,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 641,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 641,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 642,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 642,
"usage_type": "name"
},
{
"api_name": "matplotlib.gridspec.GridSpec",
"line_number": 643,
"usage_type": "call"
},
{
"api_name": "matplotlib.gridspec",
"line_number": 643,
"usage_type": "name"
},
{
"api_name": "cartopy.crs.PlateCarree",
"line_number": 645,
"usage_type": "call"
},
{
"api_name": "cartopy.crs",
"line_number": 645,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 646,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 646,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.get_cmap",
"line_number": 653,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 653,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 654,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 654,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 663,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 663,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 664,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 664,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.colorbar",
"line_number": 666,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 666,
"usage_type": "name"
},
{
"api_name": "cartopy.crs.PlateCarree",
"line_number": 675,
"usage_type": "call"
},
{
"api_name": "cartopy.crs",
"line_number": 675,
"usage_type": "name"
},
{
"api_name": "cartopy.mpl.gridliner.LONGITUDE_FORMATTER",
"line_number": 681,
"usage_type": "name"
},
{
"api_name": "cartopy.mpl.gridliner.LATITUDE_FORMATTER",
"line_number": 682,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 689,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 689,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 692,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 694,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 694,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 713,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 713,
"usage_type": "name"
},
{
"api_name": "numpy.abs",
"line_number": 719,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 720,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 725,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 725,
"usage_type": "name"
},
{
"api_name": "numpy.abs",
"line_number": 729,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 730,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 735,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 739,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 739,
"usage_type": "name"
},
{
"api_name": "seaborn.histplot",
"line_number": 743,
"usage_type": "call"
},
{
"api_name": "ulmo.plotting.load_palette",
"line_number": 754,
"usage_type": "call"
},
{
"api_name": "ulmo.plotting",
"line_number": 754,
"usage_type": "name"
},
{
"api_name": "numpy.where",
"line_number": 757,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 758,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 758,
"usage_type": "attribute"
},
{
"api_name": "ulmo.utils.image_utils.grab_img",
"line_number": 762,
"usage_type": "call"
},
{
"api_name": "ulmo.utils.image_utils",
"line_number": 762,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 767,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 767,
"usage_type": "name"
},
{
"api_name": "seaborn.heatmap",
"line_number": 768,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 772,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 773,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 773,
"usage_type": "attribute"
},
{
"api_name": "ulmo.utils.image_utils.grab_img",
"line_number": 777,
"usage_type": "call"
},
{
"api_name": "ulmo.utils.image_utils",
"line_number": 777,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 782,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 782,
"usage_type": "name"
},
{
"api_name": "seaborn.heatmap",
"line_number": 783,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 788,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 788,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 789,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 789,
"usage_type": "name"
},
{
"api_name": "results.load_log_prob",
"line_number": 794,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 798,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 805,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 809,
"usage_type": "call"
},
{
"api_name": "results.random_imgs",
"line_number": 814,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 819,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 821,
"usage_type": "call"
},
{
"api_name": "ulmo.plotting.load_palette",
"line_number": 829,
"usage_type": "call"
},
{
"api_name": "ulmo.plotting",
"line_number": 829,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 830,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 830,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 831,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 831,
"usage_type": "name"
},
{
"api_name": "matplotlib.gridspec.GridSpec",
"line_number": 832,
"usage_type": "call"
},
{
"api_name": "matplotlib.gridspec",
"line_number": 832,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 837,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 837,
"usage_type": "name"
},
{
"api_name": "ulmo.utils.image_utils.grab_img",
"line_number": 841,
"usage_type": "call"
},
{
"api_name": "ulmo.utils.image_utils",
"line_number": 841,
"usage_type": "name"
},
{
"api_name": "seaborn.heatmap",
"line_number": 848,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 861,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 861,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 862,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 862,
"usage_type": "name"
},
{
"api_name": "results.load_log_prob",
"line_number": 875,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 882,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 883,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 884,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 885,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 886,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 889,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 890,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 892,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 892,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 893,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 893,
"usage_type": "name"
},
{
"api_name": "matplotlib.gridspec.GridSpec",
"line_number": 894,
"usage_type": "call"
},
{
"api_name": "matplotlib.gridspec",
"line_number": 894,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 897,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 897,
"usage_type": "name"
},
{
"api_name": "seaborn.jointplot",
"line_number": 899,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 913,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 942,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 942,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 943,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 943,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 944,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 944,
"usage_type": "name"
},
{
"api_name": "results.load_log_prob",
"line_number": 953,
"usage_type": "call"
},
{
"api_name": "results.load_log_prob",
"line_number": 955,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 959,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 962,
"usage_type": "call"
},
{
"api_name": "ulmo.utils.utils.match_ids",
"line_number": 966,
"usage_type": "call"
},
{
"api_name": "ulmo.utils.utils",
"line_number": 966,
"usage_type": "name"
},
{
"api_name": "ulmo.utils.utils.match_ids",
"line_number": 971,
"usage_type": "call"
},
{
"api_name": "ulmo.utils.utils",
"line_number": 971,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 991,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 991,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 992,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 992,
"usage_type": "name"
},
{
"api_name": "matplotlib.gridspec.GridSpec",
"line_number": 993,
"usage_type": "call"
},
{
"api_name": "matplotlib.gridspec",
"line_number": 993,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 996,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 996,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 1001,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1001,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 1010,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1010,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 1011,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1011,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 1012,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1012,
"usage_type": "name"
},
{
"api_name": "results.load_log_prob",
"line_number": 1035,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 1040,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 1053,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 1054,
"usage_type": "call"
},
{
"api_name": "numpy.histogram2d",
"line_number": 1056,
"usage_type": "call"
},
{
"api_name": "numpy.histogram2d",
"line_number": 1059,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 1062,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1062,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 1063,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1063,
"usage_type": "name"
},
{
"api_name": "matplotlib.gridspec.GridSpec",
"line_number": 1064,
"usage_type": "call"
},
{
"api_name": "matplotlib.gridspec",
"line_number": 1064,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 1067,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1067,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.get_cmap",
"line_number": 1069,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1069,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.get_cmap",
"line_number": 1074,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1074,
"usage_type": "name"
},
{
"api_name": "numpy.sum",
"line_number": 1075,
"usage_type": "call"
},
{
"api_name": "numpy.product",
"line_number": 1075,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.colorbar",
"line_number": 1085,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1085,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 1097,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 1098,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1098,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 1099,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 1103,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 1104,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1104,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 1106,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 1111,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1111,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 1112,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1112,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 1113,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1113,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 1184,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1184,
"usage_type": "name"
},
{
"api_name": "numpy.exp",
"line_number": 1199,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 1199,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors.LinearSegmentedColormap.from_list",
"line_number": 1203,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors.LinearSegmentedColormap",
"line_number": 1203,
"usage_type": "name"
},
{
"api_name": "matplotlib.colors.LinearSegmentedColormap.from_list",
"line_number": 1207,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors.LinearSegmentedColormap",
"line_number": 1207,
"usage_type": "name"
},
{
"api_name": "matplotlib.colors.colorConverter.to_rgba",
"line_number": 1212,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors.colorConverter",
"line_number": 1212,
"usage_type": "name"
},
{
"api_name": "numpy.histogram2d",
"line_number": 1219,
"usage_type": "call"
},
{
"api_name": "IPython.embed",
"line_number": 1222,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.gaussian_filter",
"line_number": 1228,
"usage_type": "name"
},
{
"api_name": "scipy.ndimage.gaussian_filter",
"line_number": 1230,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 1234,
"usage_type": "call"
},
{
"api_name": "numpy.cumsum",
"line_number": 1236,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 1238,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 1245,
"usage_type": "call"
},
{
"api_name": "numpy.any",
"line_number": 1246,
"usage_type": "call"
},
{
"api_name": "numpy.any",
"line_number": 1248,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 1249,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 1250,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 1257,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 1267,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 1268,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 1268,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 1270,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 1270,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 1272,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 1273,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 1273,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 1275,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 1275,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 1298,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 1319,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 1319,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 1413,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 1430,
"usage_type": "attribute"
}
] |
57533445
|
class_name = 'content_tab'
from qtpy import QtCore, QtGui, QtWidgets, uic
import os
import sys
import re
import pathlib
import json
from logzero import logger
from modules.sumologic import SumoLogic
from modules.shared import ShowTextDialog
class findReplaceCopyDialog(QtWidgets.QDialog):
def __init__(self, fromcategories, tocategories, parent=None):
super(findReplaceCopyDialog, self).__init__(parent)
self.objectlist = []
self.setupUi(self, fromcategories, tocategories)
def setupUi(self, Dialog, fromcategories, tocategories):
# setup static elements
Dialog.setObjectName("FindReplaceCopy")
Dialog.setMinimumWidth(700)
Dialog.setWindowTitle('Dynamically Replace Source Category Strings')
QBtn = QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel
self.buttonBox = QtWidgets.QDialogButtonBox(QBtn)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
# set up the list of destination categories to populate into the comboboxes
itemmodel = QtGui.QStandardItemModel()
for tocategory in tocategories:
text_item = QtGui.QStandardItem(str(tocategory))
itemmodel.appendRow(text_item)
itemmodel.sort(0)
self.layoutSelections = QtWidgets.QGridLayout()
self.labelReplace = QtWidgets.QLabel()
self.labelReplace.setText("Replace")
self.layoutSelections.addWidget(self.labelReplace, 0, 0)
self.labelOriginal = QtWidgets.QLabel()
self.labelOriginal.setText("Original Source Category")
self.layoutSelections.addWidget(self.labelOriginal, 0, 1)
self.labelReplaceWith = QtWidgets.QLabel()
self.labelReplaceWith.setText("With:")
self.layoutSelections.addWidget(self.labelReplaceWith, 0, 2)
# Create 1 set of (checkbox, label, combobox per fromcategory
for index, fromcategory in enumerate(fromcategories):
objectdict = {'checkbox': None, 'label': None, 'combobox': None}
objectdict['checkbox'] = QtWidgets.QCheckBox()
objectdict['checkbox'].setObjectName("checkBox" + str(index))
objectdict['checkbox'].setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.layoutSelections.addWidget(objectdict['checkbox'], index + 1, 0)
objectdict['label']= QtWidgets.QLabel()
objectdict['label'].setObjectName("comboBox" + str(index))
objectdict['label'].setText(fromcategory)
objectdict['label'].setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.layoutSelections.addWidget(objectdict['label'], index + 1, 1)
objectdict['combobox'] = QtWidgets.QComboBox()
objectdict['combobox'].setObjectName("comboBox" + str(index))
objectdict['combobox'].setModel(itemmodel)
objectdict['combobox'].setEditable(True)
objectdict['combobox'].setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.layoutSelections.addWidget(objectdict['combobox'], index + 1, 2)
self.objectlist.append(objectdict)
self.groupBox = QtWidgets.QGroupBox()
self.groupBox.setLayout(self.layoutSelections)
# Creata a vertical scroll area with a grid layout inside with label headers
self.scrollArea = QtWidgets.QScrollArea()
self.scrollArea.setWidget(self.groupBox)
self.scrollArea.setWidgetResizable(True)
#self.scrollArea.setFixedHeight(400)
self.scrollArea.setMaximumHeight(500)
self.scrollArea.setMinimumWidth(700)
self.layout = QtWidgets.QVBoxLayout()
self.layout.addWidget(self.scrollArea)
self.layout.addWidget(self.buttonBox)
self.setLayout(self.layout)
def getresults(self):
results = []
for object in self.objectlist:
if str(object['checkbox'].checkState()) == '2':
objectdata = { 'from': str(object['label'].text()), 'to': str(object['combobox'].currentText())}
results.append(objectdata)
return results
class content_tab(QtWidgets.QWidget):
def __init__(self, mainwindow):
super(content_tab, self).__init__()
self.mainwindow = mainwindow
self.tab_name = 'Content'
self.cred_usage = 'both'
content_widget_ui = os.path.join(self.mainwindow.basedir, 'data/content.ui')
uic.loadUi(content_widget_ui, self)
# Load icons used in the listviews
self.load_icons()
self.reset_stateful_objects()
# set up some variables to identify the content list widgets. This is read by some of the content methods
# to determine proper course of action
self.contentListWidgetLeft.side = 'left'
self.contentListWidgetRight.side = 'right'
# Content Pane Signals
# Left Side
self.pushButtonUpdateContentLeft.clicked.connect(lambda: self.updatecontentlist(
self.contentListWidgetLeft,
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionLeft.currentText())],
str(self.mainwindow.lineEditUserNameLeft.text()),
str(self.mainwindow.lineEditPasswordLeft.text()),
self.buttonGroupContentLeft.checkedId(),
self.contentCurrentDirLabelLeft
))
self.contentListWidgetLeft.itemDoubleClicked.connect(lambda item: self.doubleclickedcontentlist(
item,
self.contentListWidgetLeft,
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionLeft.currentText())],
str(self.mainwindow.lineEditUserNameLeft.text()),
str(self.mainwindow.lineEditPasswordLeft.text()),
self.buttonGroupContentLeft.checkedId(),
self.contentCurrentDirLabelLeft
))
self.pushButtonParentDirContentLeft.clicked.connect(lambda: self.parentdircontentlist(
self.contentListWidgetLeft,
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionLeft.currentText())],
str(self.mainwindow.lineEditUserNameLeft.text()),
str(self.mainwindow.lineEditPasswordLeft.text()),
self.buttonGroupContentLeft.checkedId(),
self.contentCurrentDirLabelLeft
))
self.buttonGroupContentLeft.buttonClicked.connect(lambda: self.contentradiobuttonchanged(
self.contentListWidgetLeft,
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionLeft.currentText())],
str(self.mainwindow.lineEditUserNameLeft.text()),
str(self.mainwindow.lineEditPasswordLeft.text()),
self.buttonGroupContentLeft.checkedId(),
self.contentCurrentDirLabelLeft,
self.pushButtonContentDeleteLeft
))
self.pushButtonContentNewFolderLeft.clicked.connect(lambda: self.create_folder(
self.contentListWidgetLeft,
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionLeft.currentText())],
str(self.mainwindow.lineEditUserNameLeft.text()),
str(self.mainwindow.lineEditPasswordLeft.text()),
self.buttonGroupContentLeft.checkedId(),
self.contentCurrentDirLabelLeft
))
self.pushButtonContentDeleteLeft.clicked.connect(lambda: self.delete_content(
self.contentListWidgetLeft,
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionLeft.currentText())],
str(self.mainwindow.lineEditUserNameLeft.text()),
str(self.mainwindow.lineEditPasswordLeft.text()),
self.buttonGroupContentLeft.checkedId(),
self.contentCurrentDirLabelLeft
))
self.pushButtonContentCopyLeftToRight.clicked.connect(lambda: self.copycontent(
self.contentListWidgetLeft,
self.contentListWidgetRight,
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionLeft.currentText())],
str(self.mainwindow.lineEditUserNameLeft.text()),
str(self.mainwindow.lineEditPasswordLeft.text()),
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionRight.currentText())],
str(self.mainwindow.lineEditUserNameRight.text()),
str(self.mainwindow.lineEditPasswordRight.text()),
self.buttonGroupContentLeft.checkedId(),
self.buttonGroupContentRight.checkedId(),
self.contentCurrentDirLabelRight
))
self.pushButtonContentFindReplaceCopyLeftToRight.clicked.connect(lambda: self.findreplacecopycontent(
self.contentListWidgetLeft,
self.contentListWidgetRight,
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionLeft.currentText())],
str(self.mainwindow.lineEditUserNameLeft.text()),
str(self.mainwindow.lineEditPasswordLeft.text()),
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionRight.currentText())],
str(self.mainwindow.lineEditUserNameRight.text()),
str(self.mainwindow.lineEditPasswordRight.text()),
self.buttonGroupContentLeft.checkedId(),
self.buttonGroupContentRight.checkedId(),
self.contentCurrentDirLabelRight
))
# Right Side
self.pushButtonUpdateContentRight.clicked.connect(lambda: self.updatecontentlist(
self.contentListWidgetRight,
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionRight.currentText())],
str(self.mainwindow.lineEditUserNameRight.text()),
str(self.mainwindow.lineEditPasswordRight.text()),
self.buttonGroupContentRight.checkedId(),
self.contentCurrentDirLabelRight
))
self.contentListWidgetRight.itemDoubleClicked.connect(lambda item: self.doubleclickedcontentlist(
item,
self.contentListWidgetRight,
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionRight.currentText())],
str(self.mainwindow.lineEditUserNameRight.text()),
str(self.mainwindow.lineEditPasswordRight.text()),
self.buttonGroupContentRight.checkedId(),
self.contentCurrentDirLabelRight
))
self.pushButtonParentDirContentRight.clicked.connect(lambda: self.parentdircontentlist(
self.contentListWidgetRight,
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionRight.currentText())],
str(self.mainwindow.lineEditUserNameRight.text()),
str(self.mainwindow.lineEditPasswordRight.text()),
self.buttonGroupContentRight.checkedId(),
self.contentCurrentDirLabelRight
))
self.buttonGroupContentRight.buttonClicked.connect(lambda: self.contentradiobuttonchanged(
self.contentListWidgetRight,
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionRight.currentText())],
str(self.mainwindow.lineEditUserNameRight.text()),
str(self.mainwindow.lineEditPasswordRight.text()),
self.buttonGroupContentRight.checkedId(),
self.contentCurrentDirLabelRight,
self.pushButtonContentDeleteRight
))
self.pushButtonContentNewFolderRight.clicked.connect(lambda: self.create_folder(
self.contentListWidgetRight,
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionRight.currentText())],
str(self.mainwindow.lineEditUserNameRight.text()),
str(self.mainwindow.lineEditPasswordRight.text()),
self.buttonGroupContentRight.checkedId(),
self.contentCurrentDirLabelRight
))
self.pushButtonContentDeleteRight.clicked.connect(lambda: self.delete_content(
self.contentListWidgetRight,
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionRight.currentText())],
str(self.mainwindow.lineEditUserNameRight.text()),
str(self.mainwindow.lineEditPasswordRight.text()),
self.buttonGroupContentRight.checkedId(),
self.contentCurrentDirLabelRight
))
self.pushButtonContentCopyRightToLeft.clicked.connect(lambda: self.copycontent(
self.contentListWidgetRight,
self.contentListWidgetLeft,
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionRight.currentText())],
str(self.mainwindow.lineEditUserNameRight.text()),
str(self.mainwindow.lineEditPasswordRight.text()),
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionLeft.currentText())],
str(self.mainwindow.lineEditUserNameLeft.text()),
str(self.mainwindow.lineEditPasswordLeft.text()),
self.buttonGroupContentRight.checkedId(),
self.buttonGroupContentLeft.checkedId(),
self.contentCurrentDirLabelLeft
))
self.pushButtonContentFindReplaceCopyRightToLeft.clicked.connect(lambda: self.findreplacecopycontent(
self.contentListWidgetRight,
self.contentListWidgetLeft,
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionRight.currentText())],
str(self.mainwindow.lineEditUserNameRight.text()),
str(self.mainwindow.lineEditPasswordRight.text()),
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionLeft.currentText())],
str(self.mainwindow.lineEditUserNameLeft.text()),
str(self.mainwindow.lineEditPasswordLeft.text()),
self.buttonGroupContentRight.checkedId(),
self.buttonGroupContentLeft.checkedId(),
self.contentCurrentDirLabelLeft
))
self.pushButtonContentBackupLeft.clicked.connect(lambda: self.backupcontent(
self.contentListWidgetLeft,
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionLeft.currentText())],
str(self.mainwindow.lineEditUserNameLeft.text()),
str(self.mainwindow.lineEditPasswordLeft.text()),
self.buttonGroupContentLeft.checkedId()
))
self.pushButtonContentBackupRight.clicked.connect(lambda: self.backupcontent(
self.contentListWidgetRight,
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionRight.currentText())],
str(self.mainwindow.lineEditUserNameRight.text()),
str(self.mainwindow.lineEditPasswordRight.text()),
self.buttonGroupContentRight.checkedId()
))
self.pushButtonContentRestoreLeft.clicked.connect(lambda: self.restorecontent(
self.contentListWidgetLeft,
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionLeft.currentText())],
str(self.mainwindow.lineEditUserNameLeft.text()),
str(self.mainwindow.lineEditPasswordLeft.text()),
self.buttonGroupContentLeft.checkedId(),
self.contentCurrentDirLabelLeft
))
self.pushButtonContentRestoreRight.clicked.connect(lambda: self.restorecontent(
self.contentListWidgetRight,
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionRight.currentText())],
str(self.mainwindow.lineEditUserNameRight.text()),
str(self.mainwindow.lineEditPasswordRight.text()),
self.buttonGroupContentRight.checkedId(),
self.contentCurrentDirLabelRight
))
self.pushButtonContentViewJSONLeft.clicked.connect(lambda: self.view_json(
self.contentListWidgetLeft,
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionLeft.currentText())],
str(self.mainwindow.lineEditUserNameLeft.text()),
str(self.mainwindow.lineEditPasswordLeft.text()),
self.buttonGroupContentLeft.checkedId()
))
self.pushButtonContentViewJSONRight.clicked.connect(lambda: self.view_json(
self.contentListWidgetRight,
self.mainwindow.loadedapiurls[str(self.mainwindow.comboBoxRegionRight.currentText())],
str(self.mainwindow.lineEditUserNameRight.text()),
str(self.mainwindow.lineEditPasswordRight.text()),
self.buttonGroupContentRight.checkedId()
))
def reset_stateful_objects(self, side='both'):
if side == 'both':
left = True
right = True
if side == 'left':
left = True
right = False
if side == 'right':
left = False
right = True
if left:
self.contentListWidgetLeft.clear()
self.contentListWidgetLeft.currentcontent = {}
self.contentListWidgetLeft.currentdirlist = []
self.contentListWidgetLeft.updated = False
if right:
self.contentListWidgetRight.clear()
self.contentListWidgetRight.currentcontent = {}
self.contentListWidgetRight.currentdirlist = []
self.contentListWidgetRight.updated = False
def load_icons(self):
self.icons = {}
iconpath = str(pathlib.Path(self.mainwindow.basedir + '/data/folder.svg'))
self.icons['Folder'] = QtGui.QIcon(iconpath)
iconpath = str(pathlib.Path(self.mainwindow.basedir + '/data/dashboard.svg'))
self.icons['Dashboard'] = QtGui.QIcon(iconpath)
iconpath = str(pathlib.Path(self.mainwindow.basedir + '/data/logsearch.svg'))
self.icons['Search'] = QtGui.QIcon(iconpath)
iconpath = str(pathlib.Path(self.mainwindow.basedir + '/data/scheduledsearch.svg'))
self.icons['scheduledsearch'] = QtGui.QIcon(iconpath)
iconpath = str(pathlib.Path(self.mainwindow.basedir + '/data/correlationrules.svg'))
self.icons['Rule'] = QtGui.QIcon(iconpath)
iconpath = str(pathlib.Path(self.mainwindow.basedir + '/data/informationmodel.svg'))
self.icons['Model'] = QtGui.QIcon(iconpath)
iconpath = str(pathlib.Path(self.mainwindow.basedir + '/data/lookuptable.svg'))
self.icons['Lookups'] = QtGui.QIcon(iconpath)
iconpath = str(pathlib.Path(self.mainwindow.basedir + '/data/parser.svg'))
self.icons['Parser'] = QtGui.QIcon(iconpath)
return
# Thanks Stackoverflow. Yoink!
def find_keys(self, obj, key):
"""Pull all values of specified key from nested JSON."""
arr = []
def extract(obj, arr, key):
"""Recursively search for values of key in JSON tree."""
if isinstance(obj, dict):
for k, v in obj.items():
if isinstance(v, (dict, list)):
extract(v, arr, key)
elif k == key:
arr.append(v)
elif isinstance(obj, list):
for item in obj:
extract(item, arr, key)
return arr
results = extract(obj, arr, key)
return results
def recurse_replace_query_strings(self, query_string_replacement_list, exported_json):
if exported_json['type'] == "SavedSearchWithScheduleSyncDefinition":
for query_string_replacement in query_string_replacement_list:
if query_string_replacement['from'] in exported_json['search']['queryText']:
exported_json['search']['queryText'] = exported_json['search']['queryText'].replace(
str(query_string_replacement['from']),
str(query_string_replacement['to']))
break
return exported_json
elif exported_json['type'] == "DashboardSyncDefinition":
for panelnum, panel in enumerate(exported_json['panels'], start=0):
if panel['viewerType'] == "metrics": # there can be multiple query strings so we have an extra loop here
for querynum, metrics_query in enumerate(panel['metricsQueries'], start=0):
for query_string_replacement in query_string_replacement_list:
if query_string_replacement['from'] in metrics_query['query']:
metrics_query['query'] = metrics_query['query'].replace(
str(query_string_replacement['from']),
str(query_string_replacement['to']))
break
panel['metricsQueries'][querynum] = metrics_query
else: # if panel is a log panel
for query_string_replacement in query_string_replacement_list:
if query_string_replacement['from'] in panel['queryString']:
panel['queryString'] = panel['queryString'].replace(
str(query_string_replacement['from']),
str(query_string_replacement['to']))
break
exported_json['panels'][panelnum] = panel
return exported_json
elif exported_json['type'] == "DashboardV2SyncDefinition": # if it's a new style dashboard
for panelnum, panel in enumerate(exported_json['panels'], start=0):
for querynum, query in enumerate(panel['queries']):
for query_string_replacement in query_string_replacement_list:
if query_string_replacement['from'] in query['queryString']:
query['queryString'] = query['queryString'].replace(
str(query_string_replacement['from']),
str(query_string_replacement['to']))
break
panel['queries'][querynum] = query
exported_json['panels'][panelnum] = panel
return exported_json
elif exported_json['type'] == "FolderSyncDefinition":
children = []
for object in exported_json['children']:
children.append(self.recurse_replace_query_strings(query_string_replacement_list, object))
exported_json['children'] = children
return exported_json
# Start methods for Content Tab
def findreplacecopycontent(self, ContentListWidgetFrom, ContentListWidgetTo, fromurl, fromid, fromkey, tourl,
toid, tokey,
fromradioselected, toradioselected, todirectorylabel):
logger.info("[Content] Copying Content")
selecteditemsfrom = ContentListWidgetFrom.selectedItems()
if toradioselected == -3 or toradioselected == -4: # Admin or Global folders selected
toadminmode = True
else:
toadminmode = False
if fromradioselected == -3 or fromradioselected == -4: # Admin or Global folders selected
fromadminmode = True
else:
fromadminmode = False
if len(selecteditemsfrom) > 0: # make sure something was selected
try:
exportsuccessful = False
fromsumo = SumoLogic(fromid, fromkey, endpoint=fromurl, log_level=self.mainwindow.log_level)
tosumo = SumoLogic(toid, tokey, endpoint=tourl, log_level=self.mainwindow.log_level)
contents = []
for selecteditem in selecteditemsfrom:
item_id = selecteditem.details['id']
contents.append(fromsumo.export_content_job_sync(item_id, adminmode=fromadminmode))
exportsuccessful = True
except Exception as e:
logger.exception(e)
self.mainwindow.errorbox('Something went wrong with the Source:\n\n' + str(e))
return
if exportsuccessful:
categoriesfrom = []
for content in contents:
query_list = self.find_keys(content, 'queryText')
query_list = query_list + self.find_keys(content, 'query')
query_list = query_list + self.find_keys(content, 'queryString')
for query in query_list:
categoriesfrom = categoriesfrom + re.findall(r'_sourceCategory\s*=\s*\\?\"?([^\s^"^)]*)\"?',
query)
# contentstring = json.dumps(content)
# categoriesfrom = categoriesfrom + re.findall(r'\"_sourceCategory\s*=\s*\\?\"?([^\s\\|]*)',
# contentstring)
uniquecategoriesfrom = list(set(categoriesfrom)) # dedupe the list
try:
fromtime = str(QtCore.QDateTime.currentDateTime().addSecs(-3600).toString(QtCore.Qt.ISODate))
totime = str(QtCore.QDateTime.currentDateTime().toString(QtCore.Qt.ISODate))
# We query the destination org to get a sample of active source categories
query = r'* | count by _sourceCategory | fields _sourceCategory'
searchresults = tosumo.search_job_records_sync(query, fromTime=fromtime, toTime=totime,
timeZone='UTC', byReceiptTime='false')
categoriesto = []
for record in searchresults:
categoriesto.append(record['map']['_sourcecategory'])
uniquecategoriesto = list(set(categoriesto))
except Exception as e:
logger.exception(e)
self.mainwindow.errorbox('Something went wrong with the Destination:\n\n' + str(e))
return
dialog = findReplaceCopyDialog(uniquecategoriesfrom, uniquecategoriesto)
dialog.exec()
dialog.show()
if str(dialog.result()) == '1':
replacelist = dialog.getresults()
logger.info(replacelist)
dialog.close()
if len(replacelist) > 0:
newcontents = []
for content in contents:
newcontents.append(self.recurse_replace_query_strings(replacelist, content))
# for entry in replacelist:
# contentstring = json.dumps(content)
# contentstring = contentstring.replace(str(entry['from']), str(entry['to']))
# logger.info(contentstring)
# newcontents.append(json.loads(contentstring))
else:
newcontents = contents
try:
tofolderid = ContentListWidgetTo.currentcontent['id']
for newcontent in newcontents:
status = tosumo.import_content_job_sync(tofolderid, newcontent, adminmode=toadminmode)
self.updatecontentlist(ContentListWidgetTo, tourl, toid, tokey, toradioselected,
todirectorylabel)
return
except Exception as e:
logger.exception(e)
self.mainwindow.errorbox('Something went wrong with the Destination:\n\n' + str(e))
return
else:
dialog.close()
return
else:
self.mainwindow.errorbox('You have not made any selections.')
return
return
def copycontent(self, ContentListWidgetFrom, ContentListWidgetTo, fromurl, fromid, fromkey, tourl, toid, tokey,
fromradioselected, toradioselected, todirectorylabel):
logger.info("[Content] Copying Content")
if toradioselected == -3 or toradioselected == -4: # Admin or Global folders selected
toadminmode = True
else:
toadminmode = False
if fromradioselected == -3 or fromradioselected == -4: # Admin or Global folders selected
fromadminmode = True
else:
fromadminmode = False
try:
selecteditems = ContentListWidgetFrom.selectedItems()
if len(selecteditems) > 0: # make sure something was selected
fromsumo = SumoLogic(fromid, fromkey, endpoint=fromurl, log_level=self.mainwindow.log_level)
tosumo = SumoLogic(toid, tokey, endpoint=tourl, log_level=self.mainwindow.log_level)
currentdir = ContentListWidgetTo.currentdirlist[-1]
tofolderid = ContentListWidgetTo.currentcontent['id']
for selecteditem in selecteditems:
item_id = selecteditem.details['id']
content = fromsumo.export_content_job_sync(item_id, adminmode=fromadminmode)
status = tosumo.import_content_job_sync(tofolderid, content, adminmode=toadminmode)
self.updatecontentlist(ContentListWidgetTo, tourl, toid, tokey, toradioselected, todirectorylabel)
return
else:
self.mainwindow.errorbox('You have not made any selections.')
return
except Exception as e:
logger.exception(e)
self.mainwindow.errorbox('Something went wrong:\n\n' + str(e))
return
def create_folder(self, ContentListWidget, url, id, key, radioselected, directorylabel):
if ContentListWidget.updated == True:
if radioselected == -3 or radioselected == -4: # Admin or Global folders selected
adminmode = True
else:
adminmode = False
message = '''
Please enter the name of the folder you wish to create:
'''
text, result = QtWidgets.QInputDialog.getText(self, 'Create Folder...', message)
if result:
for item in ContentListWidget.currentcontent['children']:
if item['name'] == str(text):
self.mainwindow.errorbox('That Directory Name Already Exists!')
return
try:
logger.info("Creating New Folder in Personal Folder Tree")
sumo = SumoLogic(id, key, endpoint=url, log_level=self.mainwindow.log_level)
error = sumo.create_folder(str(text), str(ContentListWidget.currentcontent['id']),
adminmode=adminmode)
self.updatecontentlist(ContentListWidget, url, id, key, radioselected, directorylabel)
return
except Exception as e:
logger.exception(e)
self.mainwindow.errorbox('Something went wrong:\n\n' + str(e))
else:
self.mainwindow.errorbox("Please update the directory list before trying to create a new folder.")
return
def delete_content(self, ContentListWidget, url, id, key, radioselected, directorylabel):
logger.info("Deleting Content")
if radioselected == -3 or radioselected == -4: # Admin or Global folders selected
adminmode = True
else:
adminmode = False
selecteditems = ContentListWidget.selectedItems()
if len(selecteditems) > 0: # make sure something was selected
message = "You are about to delete the following item(s):\n\n"
for selecteditem in selecteditems:
message = message + str(selecteditem.text()) + "\n"
message = message + '''
This is exceedingly DANGEROUS!!!!
Please be VERY, VERY, VERY sure you want to do this!
You could lose quite a bit of work if you delete the wrong thing(s).
If you are absolutely sure, type "DELETE" in the box below.
'''
text, result = QtWidgets.QInputDialog.getText(self, 'Warning!!', message)
if (result and (str(text) == 'DELETE')):
try:
sumo = SumoLogic(id, key, endpoint=url, log_level=self.mainwindow.log_level)
for selecteditem in selecteditems:
item_id = selecteditem.details['id']
result = sumo.delete_content_job_sync(item_id, adminmode=adminmode)
self.updatecontentlist(ContentListWidget, url, id, key, radioselected, directorylabel)
return
except Exception as e:
logger.exception(e)
self.mainwindow.errorbox('Something went wrong:\n\n' + str(e))
else:
self.mainwindow.errorbox('You need to select something before you can delete it.')
return
def contentradiobuttonchanged(self, ContentListWidget, url, id, key, radioselected, directorylabel,
pushButtonContentDelete):
ContentListWidget.currentdirlist = []
self.updatecontentlist(ContentListWidget, url, id, key, radioselected, directorylabel)
return
def togglecontentbuttons(self, side, state):
if side == 'left':
self.pushButtonContentCopyRightToLeft.setEnabled(state)
self.pushButtonContentFindReplaceCopyRightToLeft.setEnabled(state)
self.pushButtonContentNewFolderLeft.setEnabled(state)
self.pushButtonContentDeleteLeft.setEnabled(state)
self.pushButtonContentBackupLeft.setEnabled(state)
self.pushButtonContentRestoreLeft.setEnabled(state)
elif side == 'right':
self.pushButtonContentCopyLeftToRight.setEnabled(state)
self.pushButtonContentFindReplaceCopyLeftToRight.setEnabled(state)
self.pushButtonContentNewFolderRight.setEnabled(state)
self.pushButtonContentDeleteRight.setEnabled(state)
self.pushButtonContentBackupRight.setEnabled(state)
self.pushButtonContentRestoreRight.setEnabled(state)
def updatecontentlist(self, ContentListWidget, url, id, key, radioselected, directorylabel):
sumo = SumoLogic(id, key, endpoint=url, log_level=self.mainwindow.log_level)
if ContentListWidget.currentdirlist:
currentdir = ContentListWidget.currentdirlist[-1]
else:
currentdir = {'name': None, 'id': 'TOP'}
try:
if (not ContentListWidget.currentcontent) or (currentdir['id'] == 'TOP'):
if radioselected == -2: # if "Personal Folder" radio button is selected
logger.info("[Content] Updating Personal Folder List")
ContentListWidget.currentcontent = sumo.get_personal_folder()
ContentListWidget.currentdirlist = []
dir = {'name': 'Personal Folder', 'id': 'TOP'}
ContentListWidget.currentdirlist.append(dir)
if 'children' in ContentListWidget.currentcontent:
self.updatecontentlistwidget(ContentListWidget, url, id, key, radioselected, directorylabel)
else:
self.mainwindow.errorbox('Incorrect Credentials or Wrong Endpoint.')
elif radioselected == -3: # if "Global Folders" radio button is selected
logger.info("[Content] Updating Global Folder List")
ContentListWidget.currentcontent = sumo.get_global_folder_sync(adminmode=True)
# Rename dict key from "data" to "children" for consistency
ContentListWidget.currentcontent['children'] = ContentListWidget.currentcontent.pop('data')
ContentListWidget.currentdirlist = []
dir = {'name': 'Global Folders', 'id': 'TOP'}
ContentListWidget.currentdirlist.append(dir)
if 'children' in ContentListWidget.currentcontent:
self.updatecontentlistwidget(ContentListWidget, url, id, key, radioselected, directorylabel)
else:
self.mainwindow.errorbox('Incorrect Credentials or Wrong Endpoint.')
else: # "Admin Folders" must be selected
logger.info("[Content] Updating Admin Folder List")
ContentListWidget.currentcontent = sumo.get_admin_folder_sync(adminmode=True)
ContentListWidget.currentdirlist = []
dir = {'name': 'Admin Recommended', 'id': 'TOP'}
ContentListWidget.currentdirlist.append(dir)
if 'children' in ContentListWidget.currentcontent:
self.updatecontentlistwidget(ContentListWidget, url, id, key, radioselected, directorylabel)
else:
self.mainwindow.errorbox('Incorrect Credentials or Wrong Endpoint.')
else:
if radioselected == -3 or radioselected == -4:
adminmode = True
else:
adminmode = False
ContentListWidget.currentcontent = sumo.get_folder(currentdir['id'], adminmode=adminmode)
self.updatecontentlistwidget(ContentListWidget, url, id, key, radioselected, directorylabel)
except Exception as e:
logger.exception(e)
self.mainwindow.errorbox('Something went wrong:\n\n' + str(e))
return
return
def doubleclickedcontentlist(self, item, ContentListWidget, url, id, key, radioselected, directorylabel):
logger.info("[Content] Going Down One Content Folder")
sumo = SumoLogic(id, key, endpoint=url, log_level=self.mainwindow.log_level)
if radioselected == -3 or radioselected == -4:
adminmode = True
else:
adminmode = False
try:
if item.details['itemType'] == 'Folder':
ContentListWidget.currentcontent = sumo.get_folder(item.details['id'], adminmode=adminmode)
dir = {'name': item.text(), 'id': item.details['id']}
ContentListWidget.currentdirlist.append(dir)
except Exception as e:
logger.exception(e)
self.mainwindow.errorbox('Something went wrong:\n\n' + str(e))
self.updatecontentlistwidget(ContentListWidget, url, id, key, radioselected, directorylabel)
def parentdircontentlist(self, ContentListWidget, url, id, key, radioselected, directorylabel):
if ContentListWidget.updated:
logger.info("[Content] Going Up One Content Folder")
sumo = SumoLogic(id, key, endpoint=url, log_level=self.mainwindow.log_level)
currentdir = ContentListWidget.currentdirlist[-1]
if currentdir['id'] != 'TOP':
parentdir = ContentListWidget.currentdirlist[-2]
else:
return
try:
if parentdir['id'] == 'TOP':
ContentListWidget.currentdirlist = []
self.updatecontentlist(ContentListWidget, url, id, key, radioselected, directorylabel)
return
else:
ContentListWidget.currentdirlist.pop()
if radioselected == -3 or radioselected == -4:
adminmode = True
else:
adminmode = False
ContentListWidget.currentcontent = sumo.get_folder(parentdir['id'], adminmode=adminmode)
self.updatecontentlist(ContentListWidget, url, id, key, radioselected, directorylabel)
return
except Exception as e:
logger.exception(e)
self.mainwindow.errorbox('Something went wrong:\n\n' + str(e))
return
def updatecontentlistwidget(self, ContentListWidget, url, id, key, radioselected, directorylabel):
try:
ContentListWidget.clear()
for object in ContentListWidget.currentcontent['children']:
item_name = ''
# if radioselected == -3:
# logger.info("Getting User info for Global Folder")
# user_info = sumo.get_user(object['createdBy'])
# item_name = '[' + user_info['firstName'] + ' ' + user_info['lastName'] + ']'
item_name = item_name + object['name']
if object['itemType'] == 'Folder':
item = QtWidgets.QListWidgetItem(self.icons['Folder'], item_name)
elif object['itemType'] == 'Search':
item = QtWidgets.QListWidgetItem(self.icons['Search'], item_name)
elif object['itemType'] == 'Dashboard' or object['itemType'] == 'Report':
item = QtWidgets.QListWidgetItem(self.icons['Dashboard'], item_name)
elif object['itemType'] == 'Lookups':
item = QtWidgets.QListWidgetItem(self.icons['Lookups'], item_name)
else:
item = QtWidgets.QListWidgetItem(item_name)
#attach the details about the object to the entry in listwidget, this makes like much easier
item.details = object
ContentListWidget.addItem(item) # populate the list widget in the GUI with no icon (fallthrough)
dirname = ''
for dir in ContentListWidget.currentdirlist:
dirname = dirname + '/' + dir['name']
directorylabel.setText(dirname)
ContentListWidget.updated = True
# if we are in the root (Top) of the global folders then we can't manipulate stuff as the entries are actually users, not content
# so turn off the buttons until we change folder type or move down a level
currentdir = ContentListWidget.currentdirlist[-1]
if currentdir['id'] == 'TOP' and radioselected == -3:
self.togglecontentbuttons(ContentListWidget.side, False)
else:
self.togglecontentbuttons(ContentListWidget.side, True)
except Exception as e:
ContentListWidget.clear()
ContentListWidget.updated = False
logger.exception(e)
return
def backupcontent(self, ContentListWidget, url, id, key, radioselected):
logger.info("[Content] Backing Up Content")
if radioselected == -3 or radioselected == -4: # Admin or Global folders selected
adminmode = True
else:
adminmode = False
selecteditems = ContentListWidget.selectedItems()
if len(selecteditems) > 0: # make sure something was selected
savepath = str(QtWidgets.QFileDialog.getExistingDirectory(self, "Select Backup Directory"))
if os.access(savepath, os.W_OK):
message = ''
sumo = SumoLogic(id, key, endpoint=url, log_level=self.mainwindow.log_level)
for selecteditem in selecteditems:
item_id = selecteditem.details['id']
try:
content = sumo.export_content_job_sync(item_id, adminmode=adminmode)
savefilepath = pathlib.Path(savepath + r'/' + str(selecteditem.text()) + r'.sumocontent.json')
if savefilepath:
with savefilepath.open(mode='w') as filepointer:
json.dump(content, filepointer)
message = message + str(selecteditem.text()) + r'.sumocontent.json' + '\n'
except Exception as e:
logger.exception(e)
self.mainwindow.errorbox('Something went wrong:\n\n' + str(e))
return
self.mainwindow.infobox('Wrote files: \n\n' + message)
else:
self.mainwindow.errorbox("You don't have permissions to write to that directory")
else:
self.mainwindow.errorbox('No content selected.')
return
def restorecontent(self, ContentListWidget, url, id, key, radioselected, directorylabel):
logger.info("[Content] Restoring Content")
if ContentListWidget.updated == True:
if 'id' in ContentListWidget.currentcontent: # make sure the current folder has a folder id
filter = "JSON (*.json)"
filelist, status = QtWidgets.QFileDialog.getOpenFileNames(self, "Open file(s)...", os.getcwd(),
filter)
if len(filelist) > 0:
sumo = SumoLogic(id, key, endpoint=url, log_level=self.mainwindow.log_level)
for file in filelist:
try:
with open(file) as filepointer:
content = json.load(filepointer)
except Exception as e:
logger.exception(e)
self.mainwindow.errorbox(
"Something went wrong reading the file. Do you have the right file permissions? Does it contain valid JSON?")
return
try:
folder_id = ContentListWidget.currentcontent['id']
if radioselected == -4 or radioselected == -3: # Admin Recommended Folders or Global folders Selected
adminmode = True
else:
adminmode = False
sumo.import_content_job_sync(folder_id, content, adminmode=adminmode)
except Exception as e:
logger.exception(e)
self.mainwindow.errorbox('Something went wrong:\n\n' + str(e))
return
self.updatecontentlist(ContentListWidget, url, id, key, radioselected, directorylabel)
else:
self.mainwindow.errorbox("You can't restore content to this folder. Does it belong to another user?")
return
else:
self.mainwindow.errorbox("Please update the directory list before restoring content")
return
def view_json(self, ContentListWidget, url, id, key, radioselected):
logger.info("[Content] Viewing JSON")
if radioselected == -3 or radioselected == -4: # Admin or Global folders selected
adminmode = True
else:
adminmode = False
selecteditems = ContentListWidget.selectedItems()
if len(selecteditems) > 0: # make sure something was selected
json_text = ''
sumo = SumoLogic(id, key, endpoint=url, log_level=self.mainwindow.log_level)
for selecteditem in selecteditems:
item_id = selecteditem.details['id']
try:
content = sumo.export_content_job_sync(item_id, adminmode=adminmode)
json_text = json_text + json.dumps(content, indent=4, sort_keys=True) + '\n\n'
except Exception as e:
logger.exception(e)
self.mainwindow.errorbox('Something went wrong:\n\n' + str(e))
return
self.json_window = ShowTextDialog('JSON', json_text, self.mainwindow.basedir)
self.json_window.show()
else:
self.mainwindow.errorbox('No content selected.')
return
| null |
modules/content_tab.py
|
content_tab.py
|
py
| 47,992 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "qtpy.QtWidgets.QDialog",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QDialogButtonBox",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QDialogButtonBox",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "qtpy.QtGui.QStandardItemModel",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "qtpy.QtGui",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "qtpy.QtGui.QStandardItem",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "qtpy.QtGui",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QGridLayout",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QLabel",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QLabel",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QLabel",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QCheckBox",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QSizePolicy",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QLabel",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QSizePolicy",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QComboBox",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QSizePolicy",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QGroupBox",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QScrollArea",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QVBoxLayout",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QWidget",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "qtpy.uic.loadUi",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "qtpy.uic",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "qtpy.QtGui.QIcon",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "qtpy.QtGui",
"line_number": 368,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "qtpy.QtGui.QIcon",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "qtpy.QtGui",
"line_number": 370,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "qtpy.QtGui.QIcon",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "qtpy.QtGui",
"line_number": 372,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "qtpy.QtGui.QIcon",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "qtpy.QtGui",
"line_number": 374,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "qtpy.QtGui.QIcon",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "qtpy.QtGui",
"line_number": 376,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "qtpy.QtGui.QIcon",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "qtpy.QtGui",
"line_number": 378,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 379,
"usage_type": "call"
},
{
"api_name": "qtpy.QtGui.QIcon",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "qtpy.QtGui",
"line_number": 380,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "qtpy.QtGui.QIcon",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "qtpy.QtGui",
"line_number": 382,
"usage_type": "name"
},
{
"api_name": "logzero.logger.info",
"line_number": 475,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 475,
"usage_type": "name"
},
{
"api_name": "modules.sumologic.SumoLogic",
"line_number": 489,
"usage_type": "call"
},
{
"api_name": "modules.sumologic.SumoLogic",
"line_number": 490,
"usage_type": "call"
},
{
"api_name": "logzero.logger.exception",
"line_number": 498,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 498,
"usage_type": "name"
},
{
"api_name": "re.findall",
"line_number": 508,
"usage_type": "call"
},
{
"api_name": "qtpy.QtCore.QDateTime.currentDateTime",
"line_number": 515,
"usage_type": "call"
},
{
"api_name": "qtpy.QtCore.QDateTime",
"line_number": 515,
"usage_type": "attribute"
},
{
"api_name": "qtpy.QtCore",
"line_number": 515,
"usage_type": "name"
},
{
"api_name": "qtpy.QtCore.Qt",
"line_number": 515,
"usage_type": "attribute"
},
{
"api_name": "qtpy.QtCore.QDateTime.currentDateTime",
"line_number": 516,
"usage_type": "call"
},
{
"api_name": "qtpy.QtCore.QDateTime",
"line_number": 516,
"usage_type": "attribute"
},
{
"api_name": "qtpy.QtCore",
"line_number": 516,
"usage_type": "name"
},
{
"api_name": "qtpy.QtCore.Qt",
"line_number": 516,
"usage_type": "attribute"
},
{
"api_name": "logzero.logger.exception",
"line_number": 528,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 528,
"usage_type": "name"
},
{
"api_name": "logzero.logger.info",
"line_number": 536,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 536,
"usage_type": "name"
},
{
"api_name": "logzero.logger.exception",
"line_number": 560,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 560,
"usage_type": "name"
},
{
"api_name": "logzero.logger.info",
"line_number": 576,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 576,
"usage_type": "name"
},
{
"api_name": "modules.sumologic.SumoLogic",
"line_number": 589,
"usage_type": "call"
},
{
"api_name": "modules.sumologic.SumoLogic",
"line_number": 590,
"usage_type": "call"
},
{
"api_name": "logzero.logger.exception",
"line_number": 605,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 605,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QInputDialog.getText",
"line_number": 620,
"usage_type": "call"
},
{
"api_name": "qtpy.QtWidgets.QInputDialog",
"line_number": 620,
"usage_type": "attribute"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 620,
"usage_type": "name"
},
{
"api_name": "logzero.logger.info",
"line_number": 628,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 628,
"usage_type": "name"
},
{
"api_name": "modules.sumologic.SumoLogic",
"line_number": 629,
"usage_type": "call"
},
{
"api_name": "logzero.logger.exception",
"line_number": 637,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 637,
"usage_type": "name"
},
{
"api_name": "logzero.logger.info",
"line_number": 645,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 645,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QInputDialog.getText",
"line_number": 664,
"usage_type": "call"
},
{
"api_name": "qtpy.QtWidgets.QInputDialog",
"line_number": 664,
"usage_type": "attribute"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 664,
"usage_type": "name"
},
{
"api_name": "modules.sumologic.SumoLogic",
"line_number": 667,
"usage_type": "call"
},
{
"api_name": "logzero.logger.exception",
"line_number": 676,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 676,
"usage_type": "name"
},
{
"api_name": "modules.sumologic.SumoLogic",
"line_number": 706,
"usage_type": "call"
},
{
"api_name": "logzero.logger.info",
"line_number": 715,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 715,
"usage_type": "name"
},
{
"api_name": "logzero.logger.info",
"line_number": 728,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 728,
"usage_type": "name"
},
{
"api_name": "logzero.logger.info",
"line_number": 743,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 743,
"usage_type": "name"
},
{
"api_name": "logzero.logger.exception",
"line_number": 768,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 768,
"usage_type": "name"
},
{
"api_name": "logzero.logger.info",
"line_number": 775,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 775,
"usage_type": "name"
},
{
"api_name": "modules.sumologic.SumoLogic",
"line_number": 776,
"usage_type": "call"
},
{
"api_name": "logzero.logger.exception",
"line_number": 788,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 788,
"usage_type": "name"
},
{
"api_name": "logzero.logger.info",
"line_number": 794,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 794,
"usage_type": "name"
},
{
"api_name": "modules.sumologic.SumoLogic",
"line_number": 795,
"usage_type": "call"
},
{
"api_name": "logzero.logger.exception",
"line_number": 819,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 819,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QListWidgetItem",
"line_number": 835,
"usage_type": "call"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 835,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QListWidgetItem",
"line_number": 837,
"usage_type": "call"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 837,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QListWidgetItem",
"line_number": 839,
"usage_type": "call"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 839,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QListWidgetItem",
"line_number": 841,
"usage_type": "call"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 841,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QListWidgetItem",
"line_number": 843,
"usage_type": "call"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 843,
"usage_type": "name"
},
{
"api_name": "logzero.logger.exception",
"line_number": 864,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 864,
"usage_type": "name"
},
{
"api_name": "logzero.logger.info",
"line_number": 868,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 868,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QFileDialog.getExistingDirectory",
"line_number": 875,
"usage_type": "call"
},
{
"api_name": "qtpy.QtWidgets.QFileDialog",
"line_number": 875,
"usage_type": "attribute"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 875,
"usage_type": "name"
},
{
"api_name": "os.access",
"line_number": 876,
"usage_type": "call"
},
{
"api_name": "os.W_OK",
"line_number": 876,
"usage_type": "attribute"
},
{
"api_name": "modules.sumologic.SumoLogic",
"line_number": 878,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 883,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 886,
"usage_type": "call"
},
{
"api_name": "logzero.logger.exception",
"line_number": 889,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 889,
"usage_type": "name"
},
{
"api_name": "logzero.logger.info",
"line_number": 901,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 901,
"usage_type": "name"
},
{
"api_name": "qtpy.QtWidgets.QFileDialog.getOpenFileNames",
"line_number": 905,
"usage_type": "call"
},
{
"api_name": "qtpy.QtWidgets.QFileDialog",
"line_number": 905,
"usage_type": "attribute"
},
{
"api_name": "qtpy.QtWidgets",
"line_number": 905,
"usage_type": "name"
},
{
"api_name": "os.getcwd",
"line_number": 905,
"usage_type": "call"
},
{
"api_name": "modules.sumologic.SumoLogic",
"line_number": 908,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 912,
"usage_type": "call"
},
{
"api_name": "logzero.logger.exception",
"line_number": 916,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 916,
"usage_type": "name"
},
{
"api_name": "logzero.logger.exception",
"line_number": 928,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 928,
"usage_type": "name"
},
{
"api_name": "logzero.logger.info",
"line_number": 942,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 942,
"usage_type": "name"
},
{
"api_name": "modules.sumologic.SumoLogic",
"line_number": 950,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 955,
"usage_type": "call"
},
{
"api_name": "logzero.logger.exception",
"line_number": 957,
"usage_type": "call"
},
{
"api_name": "logzero.logger",
"line_number": 957,
"usage_type": "name"
},
{
"api_name": "modules.shared.ShowTextDialog",
"line_number": 960,
"usage_type": "call"
}
] |
5969881
|
import folium
import pandas
import math
data_k = pandas.read_excel("data_k.xlsx",sheet_name=0)
data_e = pandas.read_excel("data_e.xlsx",sheet_name=0)
data_v = pandas.read_excel("data_v.xlsx",sheet_name=0)
basic = ["show_name","country_name","country_id","population", "latitude", "longitude"]
basic2 = ["show_name","country_name","country_id","population", "county", "latitude", "longitude"]
def popuptext():
if math.isnan(pop) == False:
return "Város: "+str(name)+" Ország: "+str(id)+", "+str(ctr)+" Népesség: "+str(int(pop))
else:
return "Város: "+str(name)+" Ország: "+str(id)+", "+str(ctr)+" Népesség: N/A"
def popuptext_k():
if math.isnan(pop) == False:
return "Város: "+str(name)+" Ország: "+str(id)+", "+str(ctr)+" Megye: "+str(cty)+" Népesség: "+str(int(pop))
else:
return "Város: "+str(name)+" Ország: "+str(id)+", "+str(ctr)+" Megye: "+str(cty)+" Népesség: N/A"
array_k = []
for i in basic2:
array_k.append(list(data_k[i]))
array_e = []
for i in basic:
array_e.append(list(data_e[i]))
array_v = []
for i in basic:
array_v.append(list(data_v[i]))
map = folium.Map(location=[17.634008, 10.701508], zoom_start=3, tiles="Mapbox Bright")
fg_k = folium.FeatureGroup(name="Kárpát-medence")
for name, ctr, id, pop, cty, lat, lon in zip(array_k[0],array_k[1],array_k[2],array_k[3],array_k[4],array_k[5],array_k[6]):
fg_k.add_child(folium.CircleMarker(location=[lat, lon], radius = 6 , popup=folium.Popup(popuptext_k(),parse_html=True),
fill_color="blue", fill=True, color = 'grey', fill_opacity=0.7))
fg_e = folium.FeatureGroup(name="Európa")
for name, ctr, id, pop, lat, lon in zip(array_e[0],array_e[1],array_e[2],array_e[3],array_e[4],array_e[5]):
fg_e.add_child(folium.CircleMarker(location=[lat, lon], radius = 6 , popup=folium.Popup(popuptext(),parse_html=True),
fill_color="yellow", fill=True, color = 'grey', fill_opacity=0.7))
fg_v = folium.FeatureGroup(name="Világ")
for name, ctr, id, pop, lat, lon in zip(array_v[0],array_v[1],array_v[2],array_v[3],array_v[4],array_v[5]):
fg_v.add_child(folium.CircleMarker(location=[lat, lon], radius = 6 , popup=folium.Popup(popuptext(),parse_html=True),
fill_color="green", fill=True, color = 'grey', fill_opacity=0.7))
map.add_child(fg_k)
map.add_child(fg_e)
map.add_child(fg_v)
map.add_child(folium.LayerControl())
map.save("gazdfoci.html")
| null |
gazdfoci.py
|
gazdfoci.py
|
py
| 2,474 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_excel",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "math.isnan",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "math.isnan",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "folium.Map",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "folium.FeatureGroup",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "folium.CircleMarker",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "folium.Popup",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "folium.FeatureGroup",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "folium.CircleMarker",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "folium.Popup",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "folium.FeatureGroup",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "folium.CircleMarker",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "folium.Popup",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "folium.LayerControl",
"line_number": 57,
"usage_type": "call"
}
] |
227478972
|
from django.shortcuts import render,redirect, get_object_or_404
from django.http import HttpResponse
from django.db.models import Q
from .models import Juridica
from .models import Ciudad
from .models import Sector
from .models import TipoEmpresa
from ..personas_naturales.models import Persona_Natural
from . import forms
from dal import autocomplete
from django.core.paginator import Paginator
class EmpresaAutocomplete(autocomplete.Select2QuerySetView):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_queryset(self):
qs = Juridica.objects.all().order_by("nombre")
if self.q:
qs = qs.filter(Q(nombre__icontains=self.q) | Q(ruc__istartswith=self.q))
#qs = qs.filter(nombre__istartswith=self.q)
return qs
def has_add_permission(self, request):
return True
class TipoAutocomplete(autocomplete.Select2QuerySetView):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_queryset(self):
qs = TipoEmpresa.objects.all().order_by("nombre")
if self.q:
qs = qs.filter(nombre__istartswith=self.q)
return qs
def has_add_permission(self, request):
return True
class SectorAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
qs = Sector.objects.all().order_by("nombre")
if self.q:
qs = qs.filter(nombre__istartswith=self.q)
return qs
def has_add_permission(self, request):
return True
# Create your views here.
def index_juridicas(request):
juridicas_list = Juridica.objects.all().order_by("pk")
filter = forms.JuridicaFilter(request.GET, queryset=juridicas_list )
paginator = Paginator(filter.qs, 30)
page = request.GET.get('page')
juridicas = paginator.get_page(page)
return render(request, 'personas_juridicas/index.html', {'juridicas': juridicas, "filter":filter})
def load_ciudades(request):
provincia_id = request.GET.get("provincia")
ciudades = Ciudad.objects.filter(provincia_id=provincia_id).order_by('nombre')
return render(request,"personas_juridicas/dropdown_ciudades.html",{"ciudades":ciudades})
def juridicas_view(request):
# def post(self, request, *args, **kwargs):
# self.object =self.get_object
# form=self.form_class(request.POST)
# if form.is_valid():
# try:
# pre = str(int(self.model.objects.latest('pk').pk+1))
# sec = '0'*(4-len(pre))+pre
# except self.model.DoesNotExist:
# sec = '0001'
# form.instance.cod_reporte = 'RC-CEC-'+sec+'-'+str(date.today().year)
# reporte=form.save()
# return HttpResponseRedirect(self.get_success_url()+'/'+str(reporte.pk))
# else:
# return self.render_to_response(self.get_context_data(form=form))
#href="{% url 'editar_juridica' pk=j.pk %}"
if(request.method == "POST"):
form = forms.JuridicaForm(request.POST)
if(form.is_valid()):
form.save()
#return HttpResponseRedirect(get_success_url()+'/'+str(reporte.pk))
ruc = form.cleaned_data["ruc"]
print(form.is_valid())
print(ruc)
return redirect("editar_juridica",pk = ruc)
else:
print(form.errors)
print("No entre a validar")
else:
form = forms.JuridicaForm()
print("Soy genial")
return render(request,"personas_juridicas/forma.html", {"form":form})
def juridicas_editar(request,pk):
n = Persona_Natural.objects.all()
if(request.method == "POST"):
p = get_object_or_404(Juridica, pk=pk)
form = forms.JuridicaForm(request.POST,instance=p)
if(form.is_valid()):
form.save()
return redirect("index_juridicas")
else:
p = get_object_or_404(Juridica, pk=pk)
form = forms.JuridicaForm(instance=p)
#form.fields["fecha"].value=None
print(n)
return render(request, 'personas_juridicas/editar_forma.html', {'form': form,'naturales':n})
def juridicas_eliminar(request,pk=None):
if(request.method == "POST"):
p = get_object_or_404(Juridica,pk=pk)
p.delete()
return redirect("index_juridicas")
else:
pk= request.GET.get('pk')
if len(pk)<13:
pk="0"+str(pk)
p = get_object_or_404(Juridica,pk=pk)
return render(request, 'personas_juridicas/eliminar.html', {'object': p})
| null |
ventas/personas_juridicas/views.py
|
views.py
|
py
| 4,208 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "dal.autocomplete.Select2QuerySetView",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "dal.autocomplete",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "models.Juridica.objects.all",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "models.Juridica.objects",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "models.Juridica",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.db.models.Q",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "dal.autocomplete.Select2QuerySetView",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "dal.autocomplete",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "models.TipoEmpresa.objects.all",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "models.TipoEmpresa.objects",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "models.TipoEmpresa",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "dal.autocomplete.Select2QuerySetView",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "dal.autocomplete",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "models.Sector.objects.all",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "models.Sector.objects",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "models.Sector",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "models.Juridica.objects.all",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "models.Juridica.objects",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "models.Juridica",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "models.Ciudad.objects.filter",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "models.Ciudad.objects",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "models.Ciudad",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "personas_naturales.models.Persona_Natural.objects.all",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "personas_naturales.models.Persona_Natural.objects",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "personas_naturales.models.Persona_Natural",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "models.Juridica",
"line_number": 113,
"usage_type": "argument"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "models.Juridica",
"line_number": 120,
"usage_type": "argument"
},
{
"api_name": "django.shortcuts.render",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "models.Juridica",
"line_number": 129,
"usage_type": "argument"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "models.Juridica",
"line_number": 136,
"usage_type": "argument"
},
{
"api_name": "django.shortcuts.render",
"line_number": 137,
"usage_type": "call"
}
] |
240327679
|
import pygame as pg
import pygame.freetype
import pygame.gfxdraw
from setting import *
from Background import *
from BKLOG import *
from Scenes import *
import typing
TOPLEFT=1
TOPRIGHT=2
BOTTOMLEFT=3
BOTTOMRIGHT=4
LEFT=5
RIGHT=6
TOP=7
BOTTOM=8
BODY=10
EDIT_MODE=True
#EDIT_MODE=False
class Label(pg.sprite.DirtySprite):
def __init__(self, text, pos):
DEBUG("<< Enter")
super().__init__()
self.text = text
self.font_name = "malgungothic"
self.font_size = 30
self.color = pg.Color("White")
self.font = pygame.freetype.SysFont(self.font_name, self.font_size)
( self.text_image, self.text_rect ) = self.font.render(self.text, self.color)
INFO(f"self.text_image = [{self.text_image}]")
INFO(f"type of self.text_image = [{type(self.text_image)}]")
self.image = pg.Surface([self.text_rect.width, self.text_rect.height], pg.SRCALPHA, 32)
self.image = self.image.convert_alpha()
self.image.blit(self.text_image, (0, 0))
self.rect = self.image.get_rect()
self.rect.x, self.rect.y = pos
self.rel_width = self.rect.width
self.rel_height = self.rect.height
DEBUG(" Exit>>")
def change_font(self, font_name):
self.font_name = font_name
def change_text(self, text):
"""Change the text whe you click"""
self.text = text
self.font = pygame.freetype.SysFont(self.font_name, self.font_size)
( self.text_image, self.text_rect ) = self.font.render(self.text, self.color)
self.image = pg.transform.smoothscale(self.text_image, (self.rel_width, self.rel_height))
self.rect = self.image.get_rect(topleft = (self.rect.x, self.rect.y))
def change_size(self, x, y, width, height):
DEBUG("<< Enter")
self.rel_x = x
self.rel_y = y
if width < 1: width = 0
if height < 1: height = 0
self.rel_width = width
self.rel_height = height
self.image = pg.transform.smoothscale(self.text_image, (width, height))
self.rect = self.image.get_rect(topleft = (x, y))
self.rect.x = x
self.rect.y = y
self.rect.width = width
self.rect.height = height
DEBUG(" Exit>>")
def board_input(self):
DEBUG("<< Enter")
keys = pg.key.get_pressed()
if self.rect.collidepoint(pg.mouse.get_pos()):
if pg.mouse.get_pressed() == (1, 0, 0):
INFO("Mouse Button pressed!! ")
self.change_text("마우스 눌렀어.")
if keys[pg.K_SPACE]:
INFO("Space key pressed!! ")
DEBUG(" Exit>>")
def apply_shadow(self):
DEBUG("<< Enter")
DEBUG(" Exit>>")
def board_animation(self):
DEBUG("<< Enter")
DEBUG(" Exit>>")
def update(self):
DEBUG("<< Enter")
self.dirty = 1
self.board_input()
self.apply_shadow()
self.board_animation()
DEBUG(" Exit>>")
class Button(pg.sprite.DirtySprite):
def __init__(self, text, pos, bg="black", feedback=""):
DEBUG("<< Enter")
super().__init__()
self.text = text
self.font_name = "malgungothic"
self.font_size = 30
self.color = pg.Color("White")
self.x, self.y = pos
self.font = pygame.freetype.SysFont(self.font_name, self.font_size)
( tmp_image, tmp_rect ) = self.font.render("밝", self.color)
one_width = int(tmp_rect.width * 1.36)
one_height = int(tmp_rect.height * 1.83)
self.margin_width = one_width
self.margin_height = (one_height -tmp_rect.height) // 2
( self.text_image, self.text_rect ) = self.font.render(self.text, self.color)
INFO(f"self.text_image = [{self.text_image}]")
INFO(f"type of self.text_image = [{type(self.text_image)}]")
self.image = pg.Surface([self.text_rect.width + (self.margin_width*2), self.text_rect.height + (self.margin_height*2)], pg.SRCALPHA, 32)
self.image = self.image.convert_alpha()
self.rect = self.image.get_rect()
self.rect.x, self.rect.y = pos
self.draw_rounded_rect(self.image, pg.Rect(0, 0, self.rect.width, self.rect.height), RED, 10)
#self.image.fill(bg)
self.image.blit(self.text_image, (self.margin_width, self.margin_height))
self.rel_width = self.rect.width
self.rel_height = self.rect.height
if feedback == "":
self.feedback = "text"
else:
self.feedback = feedback
#self.change_text(text, bg)
DEBUG(" Exit>>")
def change_text(self, text, bg="black"):
DEBUG("<< Enter")
"""Change the text when you click"""
self.text = text
self.font = pygame.freetype.SysFont(self.font_name, self.font_size)
( self.text_image, self.text_rect ) = self.font.render(self.text, self.color)
self.image = pg.Surface([self.rect.width, self.rect.height], pg.SRCALPHA, 32)
self.image = self.image.convert_alpha()
self.image.fill(bg)
self.image.blit(self.text_image, (0, 0))
self.rect = self.image.get_rect()
DEBUG(" Exit>>")
def draw_rounded_rect(self, surface, rect, color, corner_radius):
''' Draw a rectangle with rounded corners.
Would prefer this:
pygame.draw.rect(surface, color, rect, border_radius=corner_radius)
but this option is not yet supported in my version of pygame so do it ourselves.
We use anti-aliased circles to make the corners smoother
'''
if rect.width < 2 * corner_radius or rect.height < 2 * corner_radius:
raise ValueError(f"Both height (rect.height) and width (rect.width) must be > 2 * corner radius ({corner_radius})")
# need to use anti aliasing circle drawing routines to smooth the corners
pygame.gfxdraw.aacircle(surface, rect.left+corner_radius, rect.top+corner_radius, corner_radius, color)
pygame.gfxdraw.aacircle(surface, rect.right-corner_radius-1, rect.top+corner_radius, corner_radius, color)
pygame.gfxdraw.aacircle(surface, rect.left+corner_radius, rect.bottom-corner_radius-1, corner_radius, color)
pygame.gfxdraw.aacircle(surface, rect.right-corner_radius-1, rect.bottom-corner_radius-1, corner_radius, color)
pygame.gfxdraw.filled_circle(surface, rect.left+corner_radius, rect.top+corner_radius, corner_radius, color)
pygame.gfxdraw.filled_circle(surface, rect.right-corner_radius-1, rect.top+corner_radius, corner_radius, color)
pygame.gfxdraw.filled_circle(surface, rect.left+corner_radius, rect.bottom-corner_radius-1, corner_radius, color)
pygame.gfxdraw.filled_circle(surface, rect.right-corner_radius-1, rect.bottom-corner_radius-1, corner_radius, color)
rect_tmp = pygame.Rect(rect)
rect_tmp.width -= 2 * corner_radius
rect_tmp.center = rect.center
pygame.draw.rect(surface, color, rect_tmp)
rect_tmp.width = rect.width
rect_tmp.height -= 2 * corner_radius
rect_tmp.center = rect.center
pygame.draw.rect(surface, color, rect_tmp)
def draw_bordered_rounded_rect(self, surface, rect, color, border_color, corner_radius, border_thickness):
if corner_radius < 0:
raise ValueError(f"border radius ({corner_radius}) must be >= 0")
rect_tmp = pygame.Rect(rect)
center = rect_tmp.center
if border_thickness:
if corner_radius <= 0:
pygame.draw.rect(surface, border_color, rect_tmp)
else:
self.draw_rounded_rect(surface, rect_tmp, border_color, corner_radius)
rect_tmp.inflate_ip(-2*border_thickness, -2*border_thickness)
inner_radius = corner_radius - border_thickness + 1
else:
inner_radius = corner_radius
if inner_radius <= 0:
pygame.draw.rect(surface, color, rect_tmp)
else:
self.draw_rounded_rect(surface, rect_tmp, color, inner_radius)
def show(self):
DEBUG("<< Enter")
#screen.blit(button1.surface, (self.x, self.y))
DEBUG(" Exit>>")
def click(self, event):
DEBUG("<< Enter")
x, y = pg.mouse.get_pos()
if event.type == pg.MOUSEBUTTONDOWN:
if pg.mouse.get_pressed()[0]:
if self.rect.collidepoint(x, y):
self.change_text(self.feedback, bg="red")
DEBUG(" Exit>>")
class Board(pg.sprite.DirtySprite):
def __init__(self, x, y, width, height, bg=GRAY):
DEBUG("<< Enter")
super().__init__()
#board_img = pg.image.load('').convert_alpha()
#board_img2 = pg.image.load('').convert_alpha()
#self.font = pg.font.SysFont("Arial", 20)
self.font = pg.font.SysFont("굴림", 20)
self.bg = bg
self.board_img = pg.Surface((width, height))
self.board_img2 = pg.Surface((width, height))
self.board_img.fill(self.bg)
self.board_img2.fill(WHITE)
self.board_imgs = [self.board_img, self.board_img2]
self.board_index = 0
#self.text = ""
#self.over_img = pg.image.load('').convert_alpha()
self.image = self.board_imgs[self.board_index]
self.rect = self.image.get_rect(topleft = (x, y))
self.shadow = 2
#self.over_sound = pg.mixer.Sound('audio/boad_over.mp3')
#self.over_sound.set_volume(0.5)
DEBUG(" Exit>>")
def change_text(self, text, bg="black"):
"""Change the text whe you click"""
self.text = self.font.render(text, 1, pg.Color("White"))
self.size = self.text.get_size()
self.surface = pg.Surface(self.size)
#self.surface.fill(bg)
self.image.blit(self.text, (0, 0))
#self.rect = pg.Rect(self.x, self.y, self.size[0], self.size[1])
def change_size(self, x, y, width, height):
DEBUG("<< Enter")
#self.image = pg.transform.smoothscale(self.image, (width, height))
if width < 1: width = 0
if height < 1: height = 0
self.image = pg.transform.smoothscale(self.board_img, (width, height))
self.rect = self.image.get_rect(topleft = (x, y))
DEBUG(" Exit>>")
def board_input(self):
DEBUG("<< Enter")
keys = pg.key.get_pressed()
if self.rect.collidepoint(pg.mouse.get_pos()):
if pg.mouse.get_pressed() == (1, 0, 0):
INFO("Mouse Button pressed!! ")
if keys[pg.K_SPACE]:
INFO("Space key pressed!! ")
DEBUG(" Exit>>")
def apply_shadow(self):
DEBUG("<< Enter")
DEBUG(" Exit>>")
def board_animation(self):
DEBUG("<< Enter")
DEBUG(" Exit>>")
def update(self):
DEBUG("<< Enter")
self.dirty = 1
self.board_input()
self.apply_shadow()
self.board_animation()
DEBUG(" Exit>>")
class MenuScene(Scene):
def __init__(self, name, gamepad):
DEBUG("<< Enter")
Scene.__init__(self, name)
self.name = name
self.gamepad = gamepad
self.nextScene = ""
self.rectangle_draging = None
self.linedrag_mode = None
#Background 로딩
#self.bg = SlideLeftBackground()
#Clock 초기화
self.clock = pg.time.Clock()
DEBUG(" Exit>>")
def start(self):
DEBUG("<< Enter")
DEBUG(">>>>>>>>>>>>>>>> [%s] Scene START >>>>>>>>>>>>>>>>"%(self.name))
#게임 객체 로딩
self.board = Board(219, 41, 585, 407, DARKOLIVEGREEN)
#self.font_board = Board( 722, 2, 300, 500)
self.menu_title = Label("메뉴를 선택해 주세요.", (348, 63))
self.exit_button = Button("종 료", (447, 348), WHITE)
self.play_button = Button("PLAY", (447, 227), WHITE)
#그룹분리
## 전체 그룹에 추가
self.allObjGroup = pg.sprite.Group()
self.allObjGroup.add(self.board)
#self.allObjGroup.add(self.font_board)
self.allObjGroup.add(self.menu_title)
self.allObjGroup.add(self.exit_button)
self.allObjGroup.add(self.play_button)
for font in pg.font.get_fonts():
INFO(f"font[{font}]")
#self.font_board.font = font
#self.font_board.change_text(f"[{font}]ABCabc가나다")
#self.menu = pg.sprite.Group()
#Background 로딩
self.bg = MenuBackground()
#Event Loop 진입
self.nextScene = self.event_loop()
DEBUG(" Exit>>")
return self.nextScene
def event_loop(self):
'''
IF event.type == pygame.QUIT
break
IF event.type == pygame.KEYDOWN
키보드 제어권 전달
배경초기화
배경그리기
객체 그리기
디스플레이 업데이트 # pygame.display.update()
refresh rate 보정 # clock.tick(60)
pygame.quit()
quit() # 종료
'''
DEBUG("<< Enter")
OnGoing = True
while OnGoing:
for event in pg.event.get():
if event.type == pg.QUIT:
OnGoing = False
if not EDIT_MODE:
INFO("RUNNING Mode")
else:
if event.type == pg.MOUSEBUTTONDOWN:
INFO("MOUSEBOTTONDOWN")
if event.button == 1:
sprites = self.allObjGroup.sprites()
for sprite in reversed(sprites):
INFO("sprite founded!!")
INFO("sprite.rect = [{}]".format(sprite.rect))
INFO("eventpos = [{}]".format(event.pos))
if sprite.rect.collidepoint(event.pos):
INFO("collision!!")
self.drag_object = sprite
mouse_x, mouse_y = event.pos
mouse_rect = pg.Rect(mouse_x - 5, mouse_y - 5, 10, 10)
# topleft check
if mouse_rect.collidepoint((sprite.rect.x, sprite.rect.y)):
self.linedrag_mode = TOPLEFT
offset_x = sprite.rect.x - mouse_x
offset_y = sprite.rect.y - mouse_y
elif mouse_rect.collidepoint((sprite.rect.x + sprite.rect.width, sprite.rect.y)):
self.linedrag_mode = TOPRIGHT
offset_x = sprite.rect.x + sprite.rect.width - mouse_x
offset_y = sprite.rect.y - mouse_y
elif mouse_rect.collidepoint((sprite.rect.x, sprite.rect.y + sprite.rect.height)):
self.linedrag_mode = BOTTOMLEFT
offset_x = sprite.rect.x - mouse_x
offset_y = sprite.rect.y + sprite.rect.height - mouse_y
elif mouse_rect.collidepoint((sprite.rect.x + sprite.rect.width, sprite.rect.y + sprite.rect.height)):
self.linedrag_mode = BOTTOMRIGHT
offset_x = sprite.rect.x + sprite.rect.width - mouse_x
offset_y = sprite.rect.y + sprite.rect.height - mouse_y
elif (sprite.rect.x) < mouse_x < (sprite.rect.x + 5):
self.linedrag_mode = LEFT
elif (sprite.rect.x + sprite.rect.width - 5) < mouse_x < (sprite.rect.x + sprite.rect.width):
self.linedrag_mode = RIGHT
elif (sprite.rect.y) < mouse_y < (sprite.rect.y + 5):
self.linedrag_mode = TOP
elif (sprite.rect.y + sprite.rect.height - 5) < mouse_y < (sprite.rect.y + sprite.rect.height):
self.linedrag_mode = BOTTOM
else:
self.linedrag_mode = BODY
offset_x = sprite.rect.x - mouse_x
offset_y = sprite.rect.y - mouse_y
#self.rectangle_draging = True
break
elif event.type == pg.MOUSEBUTTONUP:
if event.button == 1:
#self.rectangle_draging = False
if self.drag_object != None:
INFO(f"self.drag_object.rect.pos=[ {self.drag_object.rect.x}, {self.drag_object.rect.y} ]")
INFO(f"self.drag_object.rect.size=[ {self.drag_object.rect.width}, {self.drag_object.rect.height} ]")
self.drag_object = None
self.linedrag_mode = None
elif event.type == pg.MOUSEMOTION:
INFO("self.linedrag_mode = [{}]".format(self.linedrag_mode))
mouse_x, mouse_y = event.pos
if self.linedrag_mode == None:
# Mouse Over 상황에서는 line drag를 할 수 있는 상황이면 마우스 커서의 모양을 바꾼다.
sprites = self.allObjGroup.sprites()
for sprite in reversed(sprites):
if sprite.rect.collidepoint(event.pos):
INFO(f"Motion Collisiton detected!!")
#self.drag_object = sprite
mouse_x, mouse_y = event.pos
mouse_rect = pg.Rect(mouse_x - 5, mouse_y - 5, 10, 10)
# topleft check
if mouse_rect.collidepoint((sprite.rect.x, sprite.rect.y)):
INFO(f"Drag Mouse Cursor = 11h")
#pg.mouse.set_system_cursor(pg.SYSTEM_CURSOR_SIZENWSE)
pg.mouse.set_cursor(pg.SYSTEM_CURSOR_SIZENWSE)
elif mouse_rect.collidepoint((sprite.rect.x + sprite.rect.width, sprite.rect.y)):
INFO(f"Drag Mouse Cursor = 1h")
#pg.mouse.set_system_cursor(pg.SYSTEM_CURSOR_SIZENESW)
pg.mouse.set_cursor(pg.SYSTEM_CURSOR_SIZENESW)
elif mouse_rect.collidepoint((sprite.rect.x, sprite.rect.y + sprite.rect.height)):
INFO(f"Drag Mouse Cursor = 7h")
#pg.mouse.set_system_cursor(pg.SYSTEM_CURSOR_SIZENESW)
pg.mouse.set_cursor(pg.SYSTEM_CURSOR_SIZENESW)
elif mouse_rect.collidepoint((sprite.rect.x + sprite.rect.width, sprite.rect.y + sprite.rect.height)):
INFO(f"Drag Mouse Cursor = 5h")
#pg.mouse.set_system_cursor(pg.SYSTEM_CURSOR_SIZENWSE)
pg.mouse.set_cursor(pg.SYSTEM_CURSOR_SIZENWSE)
elif (sprite.rect.x) < mouse_x < (sprite.rect.x + 5):
INFO(f"Drag Mouse Cursor = 9h")
#pg.mouse.set_system_cursor(pg.SYSTEM_CURSOR_SIZEWE)
pg.mouse.set_cursor(pg.SYSTEM_CURSOR_SIZEWE)
elif (sprite.rect.x + sprite.rect.width - 5) < mouse_x < (sprite.rect.x + sprite.rect.width):
INFO(f"Drag Mouse Cursor = 3h")
#pg.mouse.set_system_cursor(pg.SYSTEM_CURSOR_SIZEWE)
pg.mouse.set_cursor(pg.SYSTEM_CURSOR_SIZEWE)
elif (sprite.rect.y) < mouse_y < (sprite.rect.y + 5):
INFO(f"Drag Mouse Cursor = 12h")
#pg.mouse.set_system_cursor(pg.SYSTEM_CURSOR_SIZENS)
pg.mouse.set_cursor(pg.SYSTEM_CURSOR_SIZENS)
elif (sprite.rect.y + sprite.rect.height - 5) < mouse_y < (sprite.rect.y + sprite.rect.height):
INFO(f"Drag Mouse Cursor = 6h")
#pg.mouse.set_system_cursor(pg.SYSTEM_CURSOR_SIZENS)
pg.mouse.set_cursor(pg.SYSTEM_CURSOR_SIZENS)
else:
#expected_cursor = pygame.cursors.Cursor(size, hotspot, xormask, andmask)
#pygame.mouse.set_cursor(expected_cursor)
# body grab
pg.mouse.set_cursor(pg.SYSTEM_CURSOR_ARROW)
break
else:
pg.mouse.set_cursor(pg.SYSTEM_CURSOR_ARROW)
else:
if self.linedrag_mode == TOPLEFT:
DEBUG("TOPLEFT")
offset_width = self.drag_object.rect.x - mouse_x
offset_height = self.drag_object.rect.y - mouse_y
DEBUG(f"[1]mouse_x = [{mouse_x}]")
DEBUG(f"[1]mouse_y = [{mouse_y}]")
DEBUG(f"[1]self.drag_object.rect.x = [{self.drag_object.rect.x}]")
DEBUG(f"[1]self.drag_object.rect.y = [{self.drag_object.rect.y}]")
DEBUG(f"[1]self.drag_object.rect.width = [{self.drag_object.rect.width}]")
DEBUG(f"[1]self.drag_object.rect.height = [{self.drag_object.rect.height}]")
DEBUG(f"[1]offset_width = [{offset_width}]")
DEBUG(f"[1]offset_height = [{offset_height}]")
self.drag_object.change_size(
self.drag_object.rect.x,
self.drag_object.rect.y,
self.drag_object.rect.width + offset_width,
self.drag_object.rect.height + offset_height
)
self.drag_object.rect.y = mouse_y
self.drag_object.rect.x = mouse_x
elif self.linedrag_mode == TOPRIGHT:
DEBUG("TOPRIGHT")
offset_width = mouse_x - (self.drag_object.rect.x + self.drag_object.rect.width)
offset_height = self.drag_object.rect.y - mouse_y
self.drag_object.change_size(
self.drag_object.rect.x,
self.drag_object.rect.y,
self.drag_object.rect.width + offset_width,
self.drag_object.rect.height + offset_height
)
self.drag_object.rect.y = mouse_y
elif self.linedrag_mode == BOTTOMLEFT:
DEBUG("BOTTOMLEFT")
offset_width = self.drag_object.rect.x - mouse_x
offset_height = mouse_y - (self.drag_object.rect.y + self.drag_object.rect.height)
self.drag_object.change_size(
self.drag_object.rect.x,
self.drag_object.rect.y,
self.drag_object.rect.width + offset_width,
self.drag_object.rect.height + offset_height
)
self.drag_object.rect.x = mouse_x
elif self.linedrag_mode == BOTTOMRIGHT:
DEBUG("BOTTOMRIGHT")
offset_width = mouse_x - (self.drag_object.rect.x + self.drag_object.rect.width )
offset_height = mouse_y - (self.drag_object.rect.y + self.drag_object.rect.height)
self.drag_object.change_size(
self.drag_object.rect.x,
self.drag_object.rect.y,
self.drag_object.rect.width + offset_width, # mouse_x - self.drag_object.rect.x
self.drag_object.rect.height + offset_height # mouse_y - self.drag_object.rect.y
)
elif self.linedrag_mode == TOP:
DEBUG("TOP")
offset_width = 0
offset_height = self.drag_object.rect.y - mouse_y
self.drag_object.change_size(
self.drag_object.rect.x,
self.drag_object.rect.y,
self.drag_object.rect.width + offset_width,
self.drag_object.rect.height + offset_height
)
self.drag_object.rect.y = mouse_y
elif self.linedrag_mode == BOTTOM:
DEBUG("BOTTOM")
offset_width = 0
offset_height = mouse_y - (self.drag_object.rect.y + self.drag_object.rect.height)
self.drag_object.change_size(
self.drag_object.rect.x,
self.drag_object.rect.y,
self.drag_object.rect.width + offset_width,
self.drag_object.rect.height + offset_height
)
elif self.linedrag_mode == LEFT:
DEBUG("LEFT")
offset_width = self.drag_object.rect.x - mouse_x
offset_height = 0
self.drag_object.change_size(
self.drag_object.rect.x,
self.drag_object.rect.y,
self.drag_object.rect.width + offset_width,
self.drag_object.rect.height + offset_height
)
self.drag_object.rect.x = mouse_x
elif self.linedrag_mode == RIGHT:
DEBUG("RIGHT")
INFO(f"mouse_x = [{mouse_x}] , self.drag_object.rect.x = [{self.drag_object.rect.x}]")
offset_width = mouse_x - (self.drag_object.rect.x + self.drag_object.rect.width)
offset_height = 0
self.drag_object.change_size(
self.drag_object.rect.x,
self.drag_object.rect.y,
self.drag_object.rect.width + offset_width,
self.drag_object.rect.height + offset_height
)
elif self.linedrag_mode == BODY:
INFO("BODY_DRAG")
self.drag_object.rect.x = mouse_x + offset_x
self.drag_object.rect.y = mouse_y + offset_y
#if event.type == pg.KEYDOWN:
# DEBUG("event.type=[%d], event.key=[%d]"%(event.type, event.key))
#키보드 제어권 전달
# if event.key == pg.K_SPACE:
# OnGoing = False
#if event.type == pg.MOUSEBUTTONDOWN:
# INFO("event.type=[%d], event.key=[%d]"%(event.type, event.key))
# 배경 초기화
self.gamepad.fill(WHITE)
# 배경 그리기
self.bg.update(self.gamepad)
# 객체 그리기
self.allObjGroup.draw(self.gamepad)
self.allObjGroup.update()
# 디스플레이 업데이트
pg.display.update()
# refresh rate 보정
self.clock.tick(REFRESH_RATE)
DEBUG("**********************goto next Scene = [%s]"%self.nextScene)
DEBUG(" Exit>>")
return self.nextScene
if __name__ == '__main__':
pg.init()
gamepad = pg.display.set_mode(GAME_SCREEN)
menuScene = MenuScene("menu", gamepad)
menuScene.start()
pg.quit()
| null |
MenuScene 20210730.py
|
MenuScene 20210730.py
|
py
| 29,966 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pygame.sprite",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "pygame.Color",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pygame.freetype.SysFont",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pygame.freetype",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "pygame.Surface",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pygame.SRCALPHA",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "pygame.freetype.SysFont",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "pygame.freetype",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.smoothscale",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.smoothscale",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "pygame.key.get_pressed",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "pygame.key",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.get_pos",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.get_pressed",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_SPACE",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "pygame.Color",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "pygame.freetype.SysFont",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "pygame.freetype",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "pygame.Surface",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "pygame.SRCALPHA",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "pygame.freetype.SysFont",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "pygame.freetype",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "pygame.Surface",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "pygame.SRCALPHA",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "pygame.gfxdraw.aacircle",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "pygame.gfxdraw",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "pygame.gfxdraw.aacircle",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "pygame.gfxdraw",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "pygame.gfxdraw.aacircle",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "pygame.gfxdraw",
"line_number": 180,
"usage_type": "attribute"
},
{
"api_name": "pygame.gfxdraw.aacircle",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "pygame.gfxdraw",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "pygame.gfxdraw.filled_circle",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "pygame.gfxdraw",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "pygame.gfxdraw.filled_circle",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "pygame.gfxdraw",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "pygame.gfxdraw.filled_circle",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "pygame.gfxdraw",
"line_number": 185,
"usage_type": "attribute"
},
{
"api_name": "pygame.gfxdraw.filled_circle",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "pygame.gfxdraw",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "pygame.draw.rect",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 197,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "pygame.draw.rect",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 209,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 219,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.get_pos",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 230,
"usage_type": "attribute"
},
{
"api_name": "pygame.MOUSEBUTTONDOWN",
"line_number": 231,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.get_pressed",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 232,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite",
"line_number": 238,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.SysFont",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 245,
"usage_type": "attribute"
},
{
"api_name": "pygame.Surface",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "pygame.Color",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "pygame.transform.smoothscale",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 279,
"usage_type": "attribute"
},
{
"api_name": "pygame.key.get_pressed",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "pygame.key",
"line_number": 285,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.get_pos",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 286,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.get_pressed",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 287,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_SPACE",
"line_number": 289,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.Clock",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 322,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Group",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 336,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.get_fonts",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 343,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 376,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 377,
"usage_type": "attribute"
},
{
"api_name": "pygame.MOUSEBUTTONDOWN",
"line_number": 383,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "pygame.MOUSEBUTTONUP",
"line_number": 427,
"usage_type": "attribute"
},
{
"api_name": "pygame.MOUSEMOTION",
"line_number": 436,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 447,
"usage_type": "call"
},
{
"api_name": "pygame.mouse.set_cursor",
"line_number": 452,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 452,
"usage_type": "attribute"
},
{
"api_name": "pygame.SYSTEM_CURSOR_SIZENWSE",
"line_number": 452,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.set_cursor",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 456,
"usage_type": "attribute"
},
{
"api_name": "pygame.SYSTEM_CURSOR_SIZENESW",
"line_number": 456,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.set_cursor",
"line_number": 460,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 460,
"usage_type": "attribute"
},
{
"api_name": "pygame.SYSTEM_CURSOR_SIZENESW",
"line_number": 460,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.set_cursor",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 464,
"usage_type": "attribute"
},
{
"api_name": "pygame.SYSTEM_CURSOR_SIZENWSE",
"line_number": 464,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.set_cursor",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 468,
"usage_type": "attribute"
},
{
"api_name": "pygame.SYSTEM_CURSOR_SIZEWE",
"line_number": 468,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.set_cursor",
"line_number": 472,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 472,
"usage_type": "attribute"
},
{
"api_name": "pygame.SYSTEM_CURSOR_SIZEWE",
"line_number": 472,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.set_cursor",
"line_number": 476,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 476,
"usage_type": "attribute"
},
{
"api_name": "pygame.SYSTEM_CURSOR_SIZENS",
"line_number": 476,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.set_cursor",
"line_number": 480,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 480,
"usage_type": "attribute"
},
{
"api_name": "pygame.SYSTEM_CURSOR_SIZENS",
"line_number": 480,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.set_cursor",
"line_number": 485,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 485,
"usage_type": "attribute"
},
{
"api_name": "pygame.SYSTEM_CURSOR_ARROW",
"line_number": 485,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.set_cursor",
"line_number": 488,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 488,
"usage_type": "attribute"
},
{
"api_name": "pygame.SYSTEM_CURSOR_ARROW",
"line_number": 488,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 620,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 620,
"usage_type": "attribute"
},
{
"api_name": "pygame.init",
"line_number": 629,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 630,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 630,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 633,
"usage_type": "call"
}
] |
577929564
|
import pytest
from api.clusters.serializers import ClusterSerializer
from api.nodes.serializers import ClusterNodeDetailSerializer, ClusterNodeSerializer, GPUSerializer
from db.models.clusters import Cluster
from db.models.nodes import ClusterNode, NodeGPU
from factories.factory_clusters import ClusterNodeFactory, GPUFactory
from tests.utils import BaseTest
@pytest.mark.clusters_mark
class TestGPUSerializer(BaseTest):
serializer_class = GPUSerializer
model_class = NodeGPU
factory_class = GPUFactory
expected_keys = {'uuid', 'cluster_node', 'serial', 'name', 'index', 'memory', 'updated_at',
'created_at', }
def setUp(self):
super().setUp()
node = ClusterNodeFactory(cluster=Cluster.load())
self.obj1 = self.factory_class(cluster_node=node)
self.obj2 = self.factory_class(cluster_node=node)
def test_serialize_one(self):
data = self.serializer_class(self.obj1).data
assert set(data.keys()) == self.expected_keys
assert data.pop('uuid') == self.obj1.uuid.hex
assert data.pop('cluster_node') == self.obj1.cluster_node.uuid.hex
data.pop('created_at')
data.pop('updated_at')
for k, v in data.items():
assert getattr(self.obj1, k) == v
def test_serialize_many(self):
data = self.serializer_class(self.model_class.objects.all(), many=True).data
assert len(data) == 2
for d in data:
assert set(d.keys()) == self.expected_keys
@pytest.mark.clusters_mark
class TestClusterNodeSerializer(BaseTest):
serializer_class = ClusterNodeSerializer
model_class = ClusterNode
factory_class = ClusterNodeFactory
expected_keys = {'uuid', 'sequence', 'name', 'hostname', 'role', 'memory', 'cpu', 'n_gpus', }
def setUp(self):
super().setUp()
self.obj1 = self.factory_class(cluster=Cluster.load())
self.obj2 = self.factory_class(cluster=Cluster.load())
def test_serialize_one(self):
data = self.serializer_class(self.obj1).data
assert set(data.keys()) == self.expected_keys
assert data.pop('uuid') == self.obj1.uuid.hex
for k, v in data.items():
assert getattr(self.obj1, k) == v
def test_serialize_many(self):
data = self.serializer_class(self.model_class.objects.all(), many=True).data
assert len(data) == 2
for d in data:
assert set(d.keys()) == self.expected_keys
@pytest.mark.clusters_mark
class TestClusterNodeDetailsSerializer(BaseTest):
serializer_class = ClusterNodeDetailSerializer
model_class = ClusterNode
expected_keys = {'uuid', 'name', 'hostname', 'role', 'docker_version',
'kubelet_version', 'os_image', 'kernel_version',
'schedulable_taints', 'schedulable_state', 'is_current',
'memory', 'cpu', 'n_gpus', 'status', 'gpus', 'sequence'}
def setUp(self):
super().setUp()
self.cluster = Cluster.load()
self.obj1 = ClusterNodeFactory(cluster=self.cluster)
self.obj2 = ClusterNodeFactory(cluster=self.cluster)
self.gpu_obj1 = GPUFactory(cluster_node=self.obj1)
self.gpu_obj2 = GPUFactory(cluster_node=self.obj2)
def test_serialize_one(self):
data = self.serializer_class(self.obj1).data
assert set(data.keys()) == self.expected_keys
assert data.pop('uuid') == self.obj1.uuid.hex
assert len(data.pop('gpus')) == 1
for k, v in data.items():
assert getattr(self.obj1, k) == v
def test_serialize_many(self):
data = self.serializer_class(self.model_class.objects.all(), many=True).data
assert len(data) == 2
for d in data:
assert set(d.keys()) == self.expected_keys
@pytest.mark.clusters_mark
class TestClusterDetailSerializer(BaseTest):
serializer_class = ClusterSerializer
model_class = Cluster
expected_keys = {'uuid', 'version_api', 'created_at', 'updated_at', 'nodes', }
def setUp(self):
super().setUp()
self.cluster = Cluster.load()
ClusterNodeFactory(cluster=self.cluster)
ClusterNodeFactory(cluster=self.cluster)
def test_serialize_one(self):
data = self.serializer_class(self.cluster).data
assert set(data.keys()) == self.expected_keys
assert len(data.pop('nodes')) == 2
assert data.pop('uuid') == self.cluster.uuid.hex
data.pop('created_at')
data.pop('updated_at')
for k, v in data.items():
assert getattr(self.cluster, k) == v
| null |
tests/test_clusters/test_serializers.py
|
test_serializers.py
|
py
| 4,611 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "tests.utils.BaseTest",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "api.nodes.serializers.GPUSerializer",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "db.models.nodes.NodeGPU",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "factories.factory_clusters.GPUFactory",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "factories.factory_clusters.ClusterNodeFactory",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "db.models.clusters.Cluster.load",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "db.models.clusters.Cluster",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "pytest.mark",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "tests.utils.BaseTest",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "api.nodes.serializers.ClusterNodeSerializer",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "db.models.nodes.ClusterNode",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "factories.factory_clusters.ClusterNodeFactory",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "db.models.clusters.Cluster.load",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "db.models.clusters.Cluster",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "db.models.clusters.Cluster.load",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "db.models.clusters.Cluster",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "pytest.mark",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "tests.utils.BaseTest",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "api.nodes.serializers.ClusterNodeDetailSerializer",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "db.models.nodes.ClusterNode",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "db.models.clusters.Cluster.load",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "db.models.clusters.Cluster",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "factories.factory_clusters.ClusterNodeFactory",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "factories.factory_clusters.ClusterNodeFactory",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "factories.factory_clusters.GPUFactory",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "factories.factory_clusters.GPUFactory",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "tests.utils.BaseTest",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "api.clusters.serializers.ClusterSerializer",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "db.models.clusters.Cluster",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "db.models.clusters.Cluster.load",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "db.models.clusters.Cluster",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "factories.factory_clusters.ClusterNodeFactory",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "factories.factory_clusters.ClusterNodeFactory",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 106,
"usage_type": "attribute"
}
] |
384243273
|
# coding = utf-8
import requests
import configparser
import os
config = configparser.ConfigParser()
basedir = os.path.abspath(os.path.dirname(__file__))
static_file_path = os.path.join(basedir, 'config.ini')
config.read(static_file_path)
cookieUrl = config.get('url', 'cookieUrl')
data = config.get('data', 'data')
r = requests.post(url=cookieUrl,data=data)
cookie = requests.post(url=cookieUrl, data=data).cookies
| null |
test2/Cookie/Cookies.py
|
Cookies.py
|
py
| 429 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "configparser.ConfigParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "requests.post",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 14,
"usage_type": "call"
}
] |
287990213
|
from bs4 import BeautifulSoup
import pandas as pd
import re
import unidecode
import time
path = []
for i in range(1, 51):
path.append('text_data/' + str(i) + '.txt')
for i in range(0, 50):
crawl_path = 'crawl_data/text_data' + str(i) + '.csv'
html = open(path[i], 'r', encoding='utf-8')
htmlhandle = html.read()
soup = BeautifulSoup(htmlhandle, 'lxml')
result = pd.DataFrame()
result.drop(result.index, inplace=True)
view = []
inform = []
reported_id = []
reported_address = []
reported_gender = []
reported_credit = []
post_time = []
cn_links = []
## 访问次数
li = soup.find_all(text=re.compile("访问次数"))
pattern = re.compile(r'\d+')
for item in li:
string = unidecode.unidecode(item.string)
view.append(re.findall(pattern, string)[0])
## 举报人数
list_report = soup.select('input[node-type="uids_num"] ')
for item in list_report:
inform.append(item['value'])
for item in soup.find_all("div", class_="user bg_orange2 clearfix"):
## 被举报人id
link_u = item.select('p a')
reported_id.append(str(link_u)[29:39])
## 被举报人位置
string = str(item.contents[5])[-15:]
add = re.sub("[A-Za-z0-9\<\>\"\/\\t]", "", string)
reported_address.append(add)
## 被举报人信用等级
x = item.select('p a[target="_blank"] ')
credit_u_i = re.findall(r'信用等级:(.+)"', str(x))
reported_credit.append(credit_u_i)
## 原微博发布时间
'''for item in soup.find_all(text=re.compile("发布时间")):
match=re.search(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}', item)
if match==None:
post_time.append('nn')
else:
post_time.append(time.strptime(match.group(), '%Y-%m-%d %H:%M:%S'))'''
for item in soup.find_all('div', {'class': 'item top'}):
if item.p.a == None:
post_time.append('None')
else:
match = re.findall(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}', str(item.p))
post_time.append(match[0])
# post_time.append(time.strptime(match.group(), '%Y-%m-%d %H:%M:%S'))
## 原微博链接
for item in soup.select('div[class="W_main_half_r"] div div div div[class="item top"]'):
if item.p.a == None:
cn_links.append('nn')
else:
weibo_cn = re.findall(r'(?<=\/)[\w]*(?=\"\s)', str(item.p.a))
cn_links.append('https://weibo.cn/comment/' + weibo_cn[0])
result['view_num'] = view
result['inform_num'] = inform
result['reported_id'] = reported_id
'''result['reported_gender'] = reported_gender'''
result['reported_address'] = reported_address
result['post_time'] = post_time
result['link_to_post'] = cn_links
result['reported_credit'] = reported_credit
print(result)
result.to_csv(crawl_path, index=None, header=True)
| null |
spider/web_comments.py
|
web_comments.py
|
py
| 2,950 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "bs4.BeautifulSoup",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "unidecode.unidecode",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 73,
"usage_type": "call"
}
] |
338121494
|
from tastypie.resources import ModelResource
from tastypie import fields
from tastypie.utils import trailing_slash
from facilities.models import Hospital, GPSurgery
from django.core import serializers
from django.http import HttpResponse
from django.db.models import Q
from django.conf.urls.defaults import url
class HospitalResource(ModelResource):
class Meta:
queryset = Hospital.objects.all()
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
class GPSurgeryResource(ModelResource):
class Meta:
queryset = GPSurgery.objects.all()
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
def autocomplete(request):
response = HttpResponse(mimetype='application/json')
name = request.GET.get('name','')
hospitals = []
surgeries = []
if len(name) >= 2:
hospitals = Hospital.objects.filter(name__icontains=name).all()
surgeries = GPSurgery.objects.filter(name__icontains=name).all()
JSONSerializer = serializers.get_serializer("json")
json_serializer = JSONSerializer()
json_serializer.serialize(hospitals + surgeries, stream=response)
return response
| null |
nhsbugs/facilities/api.py
|
api.py
|
py
| 1,189 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "tastypie.resources.ModelResource",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "facilities.models.Hospital.objects.all",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "facilities.models.Hospital.objects",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "facilities.models.Hospital",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "tastypie.resources.ModelResource",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "facilities.models.GPSurgery.objects.all",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "facilities.models.GPSurgery.objects",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "facilities.models.GPSurgery",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "facilities.models.Hospital.objects.filter",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "facilities.models.Hospital.objects",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "facilities.models.Hospital",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "facilities.models.GPSurgery.objects.filter",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "facilities.models.GPSurgery.objects",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "facilities.models.GPSurgery",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "django.core.serializers.get_serializer",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.core.serializers",
"line_number": 36,
"usage_type": "name"
}
] |
451508286
|
import torch.nn as nn
from torch import optim
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.sampler import BatchSampler
from torchsummary import summary
from torchvision import transforms
from torchvision.datasets import DatasetFolder
import errno
import numpy as np
import os
import pickle
import random
import sys
import torch
import torch.nn.functional as F
import torchvision
import matplotlib.pyplot as plt
import math
ratio = 3 # reduction ratio for SE
###############################################################################################################
# server
###############################################################################################################
sys.path.append('/data/ADERGHAL/code-source/ADNI_Data_processing/src/data_processing/')
root_path = '/data/ADERGHAL/ADNI_workspace/results/ADNI_des/F_28P_F10_MS2_MB05D/HIPP/3D/AD-NC/'
###############################################################################################################
# HP computer
###############################################################################################################
#sys.path.append('/home/karim/workspace/vscode-python/ADNI_Data_processing/src/data_processing')
#root_path = '/home/karim/workspace/ADNI_workspace/results/ADNI_des/F_28P_F100_MS2_MB10D/HIPP/3D/AD-NC/'
ADNI_MODEL_EXTENSIONS = ('.pkl')
# 1 pickle loader (load one sample)
def pickle_loader(path_file):
dir_name = os.path.dirname(path_file)
with open(path_file, 'rb') as f:
model_adni = pickle.load(f)
return model_adni
# to check if the file type is allowed
def has_file_allowed_extension(filename, extensions):
return filename.lower().endswith(extensions)
def is_image_file(filename):
return has_file_allowed_extension(filename, ADNI_MODEL_EXTENSIONS)
# function
def make_dataset(dir, class_to_idx, extensions=None, is_valid_file=None):
images = []
dir = os.path.expanduser(dir)
if not ((extensions is None) ^ (is_valid_file is None)):
raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
if extensions is not None:
def is_valid_file(x):
return has_file_allowed_extension(x, extensions)
for target in sorted(class_to_idx.keys()):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
path = os.path.join(root, fname)
if is_valid_file(path):
item = (path, class_to_idx[target])
images.append(item)
return images
# 2 Class Datafolder
class Dataset_ADNI_Folder(DatasetFolder):
# Methodes
def __init__(self, root, loader, extensions=None, transform=None, target_transform=None, is_valid_file=None):
self.root = root
classes, class_to_idx = self._find_classes(self.root)
samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file)
if len(samples) == 0:
raise (RuntimeError("Found 0 files in subfolders of: " + self.root + "\n"
"Supported extensions are: " + ",".join(extensions)))
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.transform = transforms.Compose([transforms.ToTensor()])
self.targets = [s[1] for s in samples]
# __getitem__
def __getitem__(self, index):
path, target = self.samples[index]
sample = self.loader(path)
# if self.transform is not None:
# sample = self.transform(sample)
# if self.target_transform is not None:
# target = self.target_transform(target)
# sample is objet instance from HippModel (L, R, V, Label)
return (sample.hippLeft, sample.hippRight, sample.hippMetaDataVector, target)
# __len__
def __len__(self):
return len(self.samples)
# _find_classes
def _find_classes(self, dir):
if sys.version_info >= (3, 5):
# Faster and available in Python 3.5 and above
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
else:
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
#==============================================================================
# Network definition
#==============================================================================
class Network_Baseline(nn.Module):
def __init__(self):
super(Network_Baseline, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(28, 32, kernel_size=7, stride=1, padding=0),
nn.BatchNorm2d(32),
nn.ReLU()
)
self.layer2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=6, stride=1, padding=0),
nn.ReLU()
)
self.pool1 = nn.Sequential(
nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
)
self.layer3 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=2, stride=1, padding=0),
nn.ReLU()
)
self.fc1 = nn.Linear(128*7*7, 2056)
self.dropout = nn.Dropout(0.5)
self.fc2 = nn.Linear(2056, 2)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.pool1(x)
x = self.layer3(x)
# print("size", x.size())
x = x.view(-1, self.num_flat_features(x))
# x = self.dropout(x)
# print("size", x.size())
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def num_flat_features(self, x):
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features
#==========================================================================
# Function: Main definition
#==========================================================================
def main():
# parames for data
id_device = 1
params_num_workers = 4
batch_size = 64
num_classes = 2
save_frequency = 2
learning_rate = 0.00001
num_epochs = 500
weight_decay = 0.0001
momentum = 0.9
train_losses, test_losses = [], []
running_loss = 0
steps = 0
print_every = 35 # 175/5
# select device
device = torch.device("cuda:" + str(id_device) if torch.cuda.is_available() else "cpu") # PyTorch v0.4.0
print("using device :", device)
model = Network_Baseline().to(device)
# weights initialization
# model.apply(weights_init)
# DataFolder
train_data = Dataset_ADNI_Folder(root=root_path + 'train/', loader=pickle_loader, extensions='.pkl', transform=None)
valid_data = Dataset_ADNI_Folder(root=root_path + 'valid/', loader=pickle_loader, extensions='.pkl', transform=None)
test_data = Dataset_ADNI_Folder(root=root_path + 'test/' , loader=pickle_loader, extensions='.pkl', transform=None)
# Dataloader
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers)
valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers)
valid_loader = test_loader
# net = LeNet()
# summary(model, (28, 28, 28))
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum, weight_decay=weight_decay)
#scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=10, gamma=0.1)
# Train the model
total_step = len(train_loader)
loss_list = []
acc_list = []
valid_acc = []
running_loss = 0.0
for epoch in range(num_epochs):
for i, (d1, d2, v, labels) in enumerate(train_loader):
#
steps += 1
# # forward + backward + optimize
# print("d1 size:", d1.size())
# d1 = torch.unsqueeze(d1, 1).to(device, dtype=torch.float)
d1 = d1.to(device, dtype=torch.float)
# print("d1 size:", d1.size())
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
outputs = model(d1)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
# Track the accuracy
total = labels.size(0)
_, predicted = torch.max(outputs.data, 1)
correct = (predicted == labels).sum().item()
# acc_list.append((correct / total) * 100)
if steps % print_every == 0:
acc_list.append((correct / total) * 100)
test_loss = 0
accuracy = 0
model.eval()
with torch.no_grad():
for i, (v_d1, v_d2, v_v, v_labels) in enumerate(valid_loader):
# v_d1 = torch.unsqueeze(v_d1, 1).to(device, dtype=torch.float)
v_d1 = v_d1.to(device, dtype=torch.float)
v_labels = v_labels.to(device)
v_outputs = model(v_d1)
batch_loss = criterion(v_outputs, v_labels)
test_loss += batch_loss.item()
ps = torch.exp(v_outputs)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == v_labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
# train_losses.append(running_loss/len(train_loader))
train_losses.append(running_loss/print_every)
test_losses.append(test_loss/len(valid_loader))
print(f"Epoch {epoch+1}/{num_epochs}.. "
f"Train loss: {running_loss/print_every:.3f}.. "
f"Train accuracy: {(correct / total) * 100:.3f}.. "
f"Test loss: {test_loss/len(valid_loader):.3f}.. "
f"Test accuracy: {(accuracy/len(valid_loader) * 100):.3f}")
valid_acc.append((accuracy/len(valid_loader) * 100))
running_loss = 0
model.train()
# scheduler.step()
plt.plot(acc_list, label='Training accu')
plt.plot(valid_acc, label='Validation accu')
plt.legend(frameon=False)
plt.show()
plt.plot(train_losses, label='Training loss')
plt.plot(test_losses, label='Validation loss')
plt.legend(frameon=False)
plt.show()
print('Finished Training')
#==========================================================================
# Start : __Main__
#==========================================================================
if __name__ == '__main__':
main()
| null |
src/pytorch-template/old/baseline/Network_num_2.py
|
Network_num_2.py
|
py
| 11,582 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.path.append",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path.expanduser",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "torchvision.datasets.DatasetFolder",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "sys.version_info",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "os.scandir",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "torch.device",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 236,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 242,
"usage_type": "name"
},
{
"api_name": "torch.optim.SGD",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "torch.float",
"line_number": 263,
"usage_type": "attribute"
},
{
"api_name": "torch.max",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "torch.float",
"line_number": 292,
"usage_type": "attribute"
},
{
"api_name": "torch.exp",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 300,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 323,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 324,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 326,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 327,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 330,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 331,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 332,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 333,
"usage_type": "name"
}
] |
320101996
|
import argparse
import numpy as np
import h5py
import data_reader as dr
import util as ut
class ImageSerializer():
def __init__(self, n_bins):
self.mjj_cut = 1100.
self.n_bins = n_bins
def read_file(self, path ):
event_reader = dr.DataReader( path )
events = event_reader.read_jet_constituents()
dijet_features = event_reader.read_jet_features()
labels = event_reader.read_labels()
return [events[:, 0, :, :], events[:, 1, :, :], dijet_features, labels]
def bin_data_to_image( self, events, bin_borders ):
eventImagesShape = ( events.shape[0], self.n_bins, self.n_bins )
images = np.zeros(eventImagesShape, dtype="float32")
for eventNo, event in enumerate(events): # for each event (100x3) populate eta-phi binned image with pt values
# bin eta and phi of event event
binIdxEta = np.digitize(event[:, 0], bin_borders, right=True) - 1 # np.digitize starts binning with 1
binIdxPhi = np.digitize(event[:, 1], bin_borders, right=True) - 1
for particle in range(event.shape[0]):
images[eventNo, binIdxEta[particle], binIdxPhi[particle]] += event[particle, 2] # add pt to bin of jet image
return images
def convert_events_to_image( self, events_j1, events_j2 ):
minAngle = -0.8;
maxAngle = 0.8
bin_borders = np.linspace(minAngle, maxAngle, num=self.n_bins) # bins for eta & phi
return [ self.bin_data_to_image( events_j1, bin_borders ), self.bin_data_to_image( events_j2, bin_borders ) ]
def normalize_by_jet_pt(self, images_j1, images_j2, jet_features, labels):
idx_ptj1 = labels.index('j1Pt')
idx_ptj2 = labels.index('j2Pt')
images_j1 = np.divide(images_j1, jet_features[:, idx_ptj1, None, None])
images_j2 = np.divide(images_j2, jet_features[:, idx_ptj2, None, None])
return [images_j1, images_j2]
def write_transformed(self, images, dijet_features, labels, out_path ):
with h5py.File(out_path,'w') as f:
f.create_dataset('images_j1_j2', data=images, compression='gzip', dtype='float32')
f.create_dataset('eventFeatures', data=dijet_features, compression='gzip', dtype='float32')
f.create_dataset('eventFeatureNames',data=[l.encode('utf-8') for l in labels]) # encode python3 unicode for h5py
print('wrote {0} event image pairs to {1}'.format(dijet_features.shape[0],out_path))
def shuffle_unisono(self, jet1_data, jet2_data, dijet_data):
assert len(jet1_data) == len(jet2_data) == len(dijet_data)
p = np.random.permutation(len(jet1_data))
return [jet1_data[p], jet2_data[p], dijet_data[p]]
def read_events_write_images(self, in_path, out_path, n_evts ):
''' reads: jet-constituents (2 jets x n events x m particles x 3 features ), dijet-features and labels,
writes: jet-images ( 2 jets x n events x n_bins x n_bins x 1 channel ), dijet-features (unmodified), lables (unmodified)
with events cut at mjj_cut and images normalized by pt
'''
# read events
events_j1, events_j2, dijet_features, labels = self.read_file( in_path )
n_evts_read = dijet_features.shape[0]
print('read {} events'.format(n_evts_read))
# reduce to number of events given
if n_evts_read > n_evts:
events_j1, events_j2, dijet_features = self.shuffle_unisono(events_j1, events_j2, dijet_features)
events_j1, events_j2, dijet_features = events_j1[:n_evts], events_j2[:n_evts], dijet_features[:n_evts]
# mass cut
mjj_idx = labels.index('mJJ')
events_j1, events_j2, dijet_features = ut.filter_arrays_on_value( events_j1, events_j2, dijet_features, filter_arr=dijet_features[:,mjj_idx], filter_val=self.mjj_cut )
# convert to images
images_j1, images_j2 = self.convert_events_to_image( events_j1, events_j2 )
# normalize by pt
images_j1, images_j2 = self.normalize_by_jet_pt( images_j1, images_j2, dijet_features, labels )
# write images to file ( dim = 2 (jets) X n_events X n_bins X n_bins
image_data = np.array([images_j1, images_j2], dtype="float32")
self.write_transformed( image_data, dijet_features, labels, out_path )
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='transform event data to jet image')
parser.add_argument('-in', dest='infile', type=str, help='input file name/path')
parser.add_argument('-out', dest='outfile', type=str, default='out.h5', help='output file name/path')
parser.add_argument('-bin', dest='n_bins', type=int, default=32, help='number of bins in jet image')
parser.add_argument('-n', dest='num_evts', type=int, default=1e9, help='number of events for output dataset')
args = parser.parse_args()
print('converting data in file', args.infile)
serializer = ImageSerializer( args.n_bins )
serializer.read_events_write_images( args.infile, args.outfile, args.num_evts )
| null |
sarewt/event_to_image_serialization.py
|
event_to_image_serialization.py
|
py
| 5,080 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "data_reader.DataReader",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.digitize",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.digitize",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.divide",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.divide",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.random.permutation",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "util.filter_arrays_on_value",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 95,
"usage_type": "call"
}
] |
434728083
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
两种最基础的inesrt和bulk insert。
"""
from __future__ import print_function
from mongoengine import *
from datetime import datetime, date
connect("test", host="localhost", port=27017)
class User(Document):
user_id = IntField(primary_key=True)
name = StringField(max_length=128)
create_time = DateTimeField()
meta = {"collection": "user"}
def __str__(self):
return "User(user_id=%r, name=%r, create_time=%r)" % (
self.user_id, self.name, self.create_time)
if __name__ == "__main__":
User.objects.delete()
User.objects.insert([
User(user_id=1, name="Jack", create_time=datetime.now()),
User(user_id=2, name="Tom", create_time=datetime.now()),
User(user_id=3, name="Paul", create_time=datetime.now()),
])
def find_edit_then_save():
"""在ORM框架中, 通常使用下面的两种方法进行update:
1. 对instance直接做修改, 最后再save即可达到update的效果。
2. 只用update方法, 默认地启动 ``$set`` 关键字。
"""
user = User.objects(user_id=1).first()
user.name = "Mike"
user.save()
user = User.objects(user_id=2).first()
user.update(name="Mike")
# find_edit_then_save()
def create_then_save():
"""即使这个instance不是从数据库中query得来的, 而是新创建的实例, 只要
主键与数据库中的一致, 也能达到update的效果。
**请注意**: 这个行为实际上是删除原有的数据, 再插入新数据。所以在本例
中, 原来实例的 ``create_time`` 一项消失了。
"""
user = User(user_id=1, name="Mike")
user.save()
# create_then_save()
def update_many():
"""使用update方法时, multi-update默认是启动的。如果只想update一条记录,
则请使用update_one方法。
"""
User.objects(user_id__gte=1).update(name="Mike")
User.objects(user_id__gte=1).update_one(name="Alice")
# update_many()
def upsert_example():
"""Upsert是一种只作用于一个instance的操作, 默认情况下, update是使用
``$set`` 关键字的。
"""
user = User(user_id=1)
user.update(name="Mike", upsert=True)
# upsert_example()
for user in User.objects:
print(user)
| null |
lsn04_basic_update_and_upsert.py
|
lsn04_basic_update_and_upsert.py
|
py
| 2,512 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 31,
"usage_type": "name"
}
] |
171918871
|
"""
Copyright (c) 2018 SPARKL Limited. All Rights Reserved.
Author <[email protected]> Miklos Duma.
Test cases for Crypto Portfolio SPARKL mix in examples repo.
"""
import pytest
from tests.conftest import (IMPORT_DIR, OPERATION, INPUT_FIELDS, EXP_RESPONSE,
CHECK_FUN, run_tests, read_from_config)
# Collect link to test slack channel from environment variable.
SLACK_CHANNEL = read_from_config('slack_channel')
# Messages sent to this Slack channel must come back with error.
WRONG_SLACK_CHANNEL = 'https://hooks.slack.com/services/bla/bla/bla'
# Files to import for tests.
FILE_PATHS = ['Library/lib_slack/lib_slack.xml']
# SPARKL path to tested configuration.
USER_TREE_PATH = '{}/lib.slack'.format(IMPORT_DIR)
# Test input.
TEST_MAP = '{\"field1\": \"foo\", \"field2\": \"bar\"}'
TEST_MESSAGE_TEXT = '{\"text\": \"Test with simple text is successful.\"}'
# Keys expected by SLACK.
EXPECTED_MESSAGE_KEYS = ['color', 'fallback', 'fields', 'pretext']
# Path to tested operations.
SOLICIT_OP = '{}/Mix/Test/Start'.format(USER_TREE_PATH)
BUILD_OP = '{}/Mix/Test/BuildMessage'.format(USER_TREE_PATH)
SEND_OP = '{}/Mix/Test/SendToSlack'.format(USER_TREE_PATH)
# Input and output fields of the configuration.
TEST_COLOUR_FLD = 'test_colour'
TEST_HEADING_FLD = 'test_heading'
TEST_MAP_FLD = 'test_map'
TEST_URL_FLD = 'test_url'
TEST_MSG_FLD = 'test_message'
# SPARKL replies/responses.
OK_RESP = 'Ok'
ERROR_RESP = 'Error'
#####################################################
# Additional check functions used by the test data. #
#####################################################
def check_msg_keys(output_fields):
"""
Checks whether the constructed message dict
has all the keys expected by SLACK.
"""
message = output_fields[TEST_MSG_FLD]
message_keys = list(message.keys())
message_keys.sort()
message_keys_error = 'Expected keys are {}, ' \
'not {}.'.format(EXPECTED_MESSAGE_KEYS, message_keys)
assert message_keys == EXPECTED_MESSAGE_KEYS, message_keys_error
##########################################################################
# Test data.
#
# Each set of data is used to call the parametrised test once.
# A set comprises:
# - OPERATION:
# The name of the operation to call
# - EXP_RESPONSE:
# The expected response/reply
# - INPUT_FIELDS (optional):
# The input fields and their values, if any
# - OUTPUT_FIELDS (optional):
# One or more output fields with their expected value
# - CHECK_FUN (optional):
# A function that makes extra assertions on the output values
# - STOP_OR_NOT (optional):
# A flag to indicate all running services must be stopped
# before the test is run
##########################################################################
TEST_DATA = [
# Test BuildMessage operation.
# Expects built message to contain all the fields Slack requires.
{
OPERATION: BUILD_OP,
INPUT_FIELDS: [
(TEST_COLOUR_FLD, 'green'),
(TEST_HEADING_FLD, 'BuildMessage test'),
(TEST_MAP_FLD, TEST_MAP)],
EXP_RESPONSE: OK_RESP,
CHECK_FUN: check_msg_keys},
# Test SendToSlack operation. Expects Ok reply.
{
OPERATION: SEND_OP,
INPUT_FIELDS: [(TEST_MSG_FLD, TEST_MESSAGE_TEXT),
(TEST_URL_FLD, SLACK_CHANNEL)],
EXP_RESPONSE: OK_RESP},
# Test SendToSlack operation with wrong URL. Expects Error reply.
{
OPERATION: SEND_OP,
INPUT_FIELDS: [(TEST_MSG_FLD, TEST_MESSAGE_TEXT),
(TEST_URL_FLD, WRONG_SLACK_CHANNEL)],
EXP_RESPONSE: ERROR_RESP},
# Test Start solicit operation - i.e. full transaction.
# Expects a message with all the fields Slack requires.
{
OPERATION: SOLICIT_OP,
INPUT_FIELDS: [(TEST_COLOUR_FLD, 'red'),
(TEST_HEADING_FLD, 'Full solicit test with fields'),
(TEST_MAP_FLD, TEST_MAP),
(TEST_URL_FLD, SLACK_CHANNEL)],
EXP_RESPONSE: OK_RESP,
CHECK_FUN: check_msg_keys}
]
@pytest.mark.parametrize('test_data', TEST_DATA)
def test_lib_slack(test_data, setup_method):
"""
Calls each set of data in TEST_DATA. The function also uses:
- setup_method:
A basic setup method that imports the needed configuration(s)
and yields the SPARKL alias used in the session
"""
alias = setup_method
run_tests(alias, **test_data)
| null |
tests/with_auth/test_lib_slack.py
|
test_lib_slack.py
|
py
| 4,554 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "tests.conftest.read_from_config",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tests.conftest.IMPORT_DIR",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "tests.conftest.OPERATION",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "tests.conftest.INPUT_FIELDS",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "tests.conftest.EXP_RESPONSE",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "tests.conftest.CHECK_FUN",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "tests.conftest.OPERATION",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "tests.conftest.INPUT_FIELDS",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "tests.conftest.EXP_RESPONSE",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "tests.conftest.OPERATION",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "tests.conftest.INPUT_FIELDS",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "tests.conftest.EXP_RESPONSE",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "tests.conftest.OPERATION",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "tests.conftest.INPUT_FIELDS",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "tests.conftest.EXP_RESPONSE",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "tests.conftest.CHECK_FUN",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "tests.conftest.run_tests",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 123,
"usage_type": "attribute"
}
] |
74662768
|
"""
Given a stack of N elements,
interleave the first half of the stack with the second half reversed using only one other queue.
This should be done in-place.
Recall that you can only push or pop from a stack, and enqueue or dequeue from a queue.
For example, if the stack is [1, 2, 3, 4, 5],
it should become [1, 5, 2, 4, 3].
If the stack is [1, 2, 3, 4], it should become [1, 4, 2, 3].
Solution:
remove i = n-1.. to 0 items from stack
return them to the stack
it will interleave, as the stack -> queue just reverses the sequence entered
"""
import collections
def interleave_stack_queue(stack):
queue = collections.deque()
n = len(stack) - 1
for i in range(n - 1, -1, -1):
for _ in range(i + 1):
queue.append(stack.pop())
for _ in range(i + 1):
stack.append(queue.popleft())
return stack
if __name__ == "__main__":
s1 = [1, 2, 3, 4, 5]
e1 = [1, 5, 2, 4, 3]
s2 = [1, 2, 3, 4]
e2 = [1, 4, 2, 3]
assert interleave_stack_queue(s1) == e1
assert interleave_stack_queue(s2) == e2
| null |
old/dcp_series/dcp_180.py
|
dcp_180.py
|
py
| 1,068 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "collections.deque",
"line_number": 21,
"usage_type": "call"
}
] |
262499944
|
import sys
sys.path.insert(0, '/data/niekserver/lib')
from http import _POST
from http import _USER
from http import html
from http import java
from general import redirect
from general import make_page
from urllib.parse import unquote
name = "Server Connect"
content_head = """
<h1>Server Connect</h1>
"""
form = " \
<form method='POST' style='display: inline;'> \
<pre style='display: inline;'>Domain/IP:\t</pre> \
<input type='text' name='url' value=''> \
<pre style='display: inline;'>Port:</pre> \
<input type='text' name='port' value=''> \
<input type=submit value='Connect'> \
</form> \
"
result = " \
<pre>Domain:\t\t{}</pre> \
<pre>IP:\t\t\t{}</pre> \
<pre>Port:\t\t\t{}</pre> \
\
<ul class='terminal'> \
<li><form> \
<input class='terminal' type='text' name='command', value=''> \
</form></li> \
<li>Test</li> \
</ul> \
"
def connect(url, port):
import socket
import re
if url[:7] == "http://":
url = url[7:]
pattern = re.compile(r'[a-zA-Z0-9.-]+')
if pattern.match(url):
ip = str(socket.gethostbyname(url))
html(make_page(name, content_head + result.format(url, ip, port)))
else:
html("Invalid Domain/IP")
if "username" not in _USER:
html(redirect("Login", '/py/login.py'))
elif "url" not in _POST or "port" not in _POST:
html(make_page(name, content_head + form))
else:
connect(unquote(_POST["url"]), _POST["port"])
| null |
NiekServer/py/server_connect.py
|
server_connect.py
|
py
| 1,425 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.path.insert",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "socket.gethostbyname",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "http.html",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "general.make_page",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "http.html",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "http._USER",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "http.html",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "general.redirect",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "http._POST",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "http.html",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "general.make_page",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "urllib.parse.unquote",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "http._POST",
"line_number": 60,
"usage_type": "name"
}
] |
639196969
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from avatar.configuration.configurationFactory import ConfigurationFactory
from avatar.system import System
import os
#Analyzer Target Emulator tests
ate_tests = {
"analyzer" :
{
"supported" : {"s2e"},
"unsupported" : {"klee", "angr"},
"unknown" : {"abc"},
},
"target" :
{
"supported" : {"openocd", "superspeed-jtag", "gdb"},
"unsupported" : {},
"unknown" : {"abc"},
},
"emulator" :
{
"supported" : {"qemu"},
"unsupported" : {},
"unknown" : {"abc"},
}
}
def generate_conf(analyzer, target, emulator, type):
analyzer_configuration = {}
emulator_configuration = {}
target_configuration = {}
configuration = {
"version" : 1.0,
"output_directory" : "",
"configuration_directory" : os.getcwd(),
"analyzer" : {"name" : analyzer, "configuration": analyzer_configuration },
"emulator" : {"name" : emulator, "configuration": emulator_configuration },
"target" : {"name" : target, "configuration": target_configuration },
}
return configuration
def test():
#Test supported, unsupported and unknown configuration
#Supported should start the element as defined
#unsupported should raise a NotImplementedError
#unknown should raise a ValueError
print("[*] Testing The Configuration module")
tested_types = {"supported", "unsupported", "unknown"}
for t in tested_types :
for analyzer in ate_tests["analyzer"][t] :
for target in ate_tests["target"][t] :
for emulator in ate_tests["emulator"][t] :
print(" [-] " + analyzer + " " + target + " " + emulator)
try :
conf = generate_conf(analyzer, target, emulator, t)
configuration = ConfigurationFactory.createParser(conf)
# target = TargetsFactory.create(self._configuration)
# emulator = EmulatorsFactory.create(self._configuration)
# avatar = System(conf, ["--debug", "--trace"])
avatar.start()
avatar.stop()
except (ValueError, NotImplementedError) as e :
if type(ex).__name__ == "ValueError" and t == "unknown" :
print(" Success")
elif type(ex).__name__ == "NotImplementedError" and t == "NotImplementedError" :
print(" Success")
else :
print("Test failed : "+ type(ex).__name__)
print("Test vector : "+ c)
| null |
avatar/tests/test_configuration.py
|
test_configuration.py
|
py
| 3,055 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "future.standard_library.install_aliases",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "future.standard_library",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "os.getcwd",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "avatar.configuration.configurationFactory.ConfigurationFactory.createParser",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "avatar.configuration.configurationFactory.ConfigurationFactory",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "avatar.configuration.configurationFactory.start",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "avatar.configuration.configurationFactory",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "avatar.configuration.configurationFactory.stop",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "avatar.configuration.configurationFactory",
"line_number": 90,
"usage_type": "name"
}
] |
464706012
|
import tkinter as tk
from tkinter import *
import tkinter as ttk
import sklearn.neighbors._typedefs
import numpy as np
import pandas as pd
import pickle
df = pd.read_csv("career_pred.csv")
df = df.iloc[: , 7:]
dataset = df
for i in dataset.columns:
Types = []
for j in dataset[i]:
#print(j)
if not(j in Types):
Types.append(j)
# Business Technical Manager Design
Business = ['Business Systems Analyst',
'Business Intelligence Analyst',
'Information Security Analyst',
'CRM Business Analyst',
'E-Commerce Analyst',
'Systems Analyst',
'Information Technology Auditor',
'Quality Assurance Associate'
]
Technical = ['Software Systems Engineer',
'Network Engineer',
'Software Engineer',
'Technical Engineer',
'Network Security Engineer',
'Database Developer',
'CRM Technical Developer',
'Mobile Applications Developer',
'Applications Developer',
'Web Developer',
'Software Developer',
'Technical Services/Help Desk/Tech Support',
'Technical Support',
'Software Quality Assurance (QA) / Testing',
'Systems Security Administrator',
'Portal Administrator',
'Network Security Administrator',
'Database Administrator',
'Solutions Architect',
'Data Architect',
'Programmer Analyst'
]
Manager = [
'Project Manager',
'Information Technology Manager',
'Database Manager'
]
Design = [
'UX Designer',
'Design & UX'
]
soft = [2,1]
hard = [3,0 ]
# Data
data = df.iloc[:,:-1].values
label = df.iloc[:,-1]
#Label Encoding: COnverting To Numeric values
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder = LabelEncoder()
for i in range(6,31):
data[:,i] = labelencoder.fit_transform(data[:,i])
#Normalizing the data
from sklearn.preprocessing import Normalizer
data1=data[:,:6]
normalized_data = Normalizer().fit_transform(data1)
data2=data[:,6:]
df1 = np.append(normalized_data,data2,axis=1)
#Combining into a dataset
df2=df.iloc[:,:-1]
dataset = pd.DataFrame(df1,columns=df2.columns)
#dataset
X=dataset.copy()
Y = df["Suggested Job Role"]
# Business Technical Manager Design
for i in range(len(Y)):
if Y[i] in Business:
Y[i] = 'Business'
elif Y[i] in Technical:
Y[i] = 'Technical'
elif Y[i] in Design:
Y[i] = 'Design'
elif Y[i] in Manager:
Y[i] = 'Manager'
# For label
label = df.iloc[:,-1]
original=label.unique()
label=label.values
label2 = labelencoder.fit_transform(label)
y=pd.DataFrame(label2,columns=["Suggested Job Role"])
numeric=y["Suggested Job Role"].unique()
Y = pd.DataFrame({'Suggested Job Role':original, 'Associated Number':numeric})
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
j_l = []
for i in Y:
if not (i in j_l):
j_l.append(i)
Y_train = []
#print(y_train)
for i in y_train['Suggested Job Role']:
if i in soft:
Y_train.append(0)
elif i in hard:
Y_train.append(1)
Y_test = []
for i in y_test['Suggested Job Role']:
if i in soft:
Y_test.append(0)
elif i in hard:
Y_test.append(1)
#from sklearn.ensemble import RandomForestClassifier
#rf_model=RandomForestClassifier(n_estimators=1000,max_features=2,oob_score=True)
#rf_model.fit(X_train,y_train)
#from sklearn.neighbors import KNeighborsClassifier
#knn = KNeighborsClassifier(n_neighbors=9)
#knn.fit(X_train,y_train)
filename = "KNN_Model.sav"
knn = pickle.load(open(filename,'rb'))
#pickle.dump(knn,open(filename,'wb'))
#print("Acc: ",knn.score(X_test,y_test))
window = tk.Tk()
window.title('Questionare P1')
window.geometry("400x550")
'''
Button
def run_model():
print("Model")
button = tk.Button(window, text='Stop', width=25, command = run_model)
button.pack()
window.mainloop()
'''
'''
Put text - LAbel
w = Label(window, text='GeeksForGeeks.org!')
w.pack()
'''
'''
List
top = Tk()
Lb = Listbox(top)
Lb.insert(1, 'Python')
Lb.insert(2, 'Java')
Lb.insert(3, 'C++')
Lb.insert(4, 'Any other')
Lb.pack()
'''
'''
Check Box
master = Tk()
var1 = IntVar()
Checkbutton(master, text='male', variable=var1).grid(row=0, sticky=W)
var2 = IntVar()
Checkbutton(master, text='female', variable=var2).grid(row=1, sticky=W)
mainloop()
'''
'''
Text box
master = Tk()
Label(master, text='First Name').grid(row=0)
Label(master, text='Last Name').grid(row=1)
e1 = Entry(master)
e2 = Entry(master)
e1.grid(row=0, column=1)
e2.grid(row=1, column=1)
mainloop()
'''
'''
List box
top = Tk()
Lb = Listbox(top)
Lb.insert(1, 'Python')
Lb.insert(2, 'Java')
Lb.insert(3, 'C++')
Lb.insert(4, 'Any other')
Lb.pack()
top.mainloop()
'''
'''
check list - Radio button
from tkinter import *
root = Tk()
v = IntVar()
Radiobutton(root, text='GfG', variable=v, value=1).pack(anchor=W)
Radiobutton(root, text='MIT', variable=v, value=2).pack(anchor=W)
mainloop()
'''
'''
scale
from tkinter import *
master = Tk()
w = Scale(master, from_=0, to=42)
w.pack()
w = Scale(master, from_=0, to=200, orient=HORIZONTAL)
w.pack()
mainloop()
'''
q1 = IntVar()
#Q 1
l1 = Label(window, text='Уровень знания математики в % (1-100)')
l1.pack()
q1_w = Scale(window, from_=1, to=100,orient=HORIZONTAL, variable = q1)
q1_w.pack()
q2 = IntVar()
#Q 2
l2 = Label(window, text='Навыки коммуникации в % (1-100)' )
l2.pack()
q2_w = Scale(window, from_=1, to=100,orient=HORIZONTAL, variable = q2)
q2_w.pack()
q3 = IntVar()
#Q 3
l3 = Label(window, text='Сколько часов в день вы работаете? (1-12)' )
l3.pack()
q3_w = Scale(window, from_=1, to=12,orient=HORIZONTAL, variable = q3)
q3_w.pack()
q4 = IntVar()
#Q 4
l4 = Label(window, text='Оцените ваш уровень логического мышления (1-8)', )
l4.pack()
q4_w = Scale(window, from_=1, to=8,orient=HORIZONTAL, variable = q4)
q4_w.pack()
q5 = IntVar()
# Q 5
l5 = Label(window, text='Сколько раз вы участвовали в Хакатонах? (0-6)', )
l5.pack()
q5_w = Scale(window, from_=0, to=6,orient=HORIZONTAL, variable = q5)
q5_w.pack()
q6 = IntVar()
# Q 6
l6 = Label(window, text='Оцените свой уровень кодинга (1-9)', )
l6.pack()
q6_w = Scale(window, from_=1, to=9,orient=HORIZONTAL, variable = q6)
q6_w.pack()
q7 = IntVar()
# Q 7
l7 = Label(window, text='Оцените ваш Навык публичных выступлений (1-9)', )
l7.pack()
q7_w = Scale(window, from_=1, to=9,orient=HORIZONTAL, variable = q7)
q7_w.pack()
def show1():
print(q1.get()," ",q2.get()," ",q3.get()," ",q4.get()," ",q5.get()," ",q6.get()," ",q7.get())
#b_show1 = tk.Button(window, text='Show', width=25, command = show1)
#b_show1.pack()
b_next1 = tk.Button(window, text='Next', fg = "green",width=25, command = window.destroy)
b_next1.pack()
#the end of first page
mainloop()
show1()
#New Window
window = tk.Tk()
window.title('Questionare P2')
window.geometry("400x550")
# Q 8
q8 = IntVar()
l8 = Label(window, text='Работали ли вы над проектом до этого времени?', )
l8.pack()
Radiobutton(window, text='Yes', variable=q8, value=1).pack(anchor=W)
Radiobutton(window, text='No', variable=q8, value=0).pack(anchor=W)
# Q 9
q9 = IntVar()
l9 = Label(window, text='Способны ли вы к самообучению?', )
l9.pack()
Radiobutton(window, text='Yes', variable=q9, value=1).pack(anchor=W)
Radiobutton(window, text='No', variable=q9, value=0).pack(anchor=W)
# Q 10
q10 = IntVar()
l10 = Label(window, text='Ходите ли вы на дополнительные занятия?', )
l10.pack()
Radiobutton(window, text='Yes', variable=q10, value=1).pack(anchor=W)
Radiobutton(window, text='No', variable=q10, value=0).pack(anchor=W)
# Q 11
q11 = IntVar()
l11 = Label(window, text='Какие у Вас сертификаты есть?', )
l11.pack()
Radiobutton(window, text='Программирование оболочки', variable=q11, value=8).pack(anchor=W)
Radiobutton(window, text='Машинное обучение', variable=q11, value=5).pack(anchor=W)
Radiobutton(window, text='Разработка приложений', variable=q11, value=0).pack(anchor=W)
Radiobutton(window, text='Python', variable=q11, value=6).pack(anchor=W)
Radiobutton(window, text='R программирование', variable=q11, value=7).pack(anchor=W)
Radiobutton(window, text='hadoop', variable=q11, value=4).pack(anchor=W)
Radiobutton(window, text='Информационная безопасность', variable=q11, value=3).pack(anchor=W)
Radiobutton(window, text='Создание дистрибутива', variable=q11, value=1).pack(anchor=W)
Radiobutton(window, text='Фул стек', variable=q11, value=2).pack(anchor=W)
def show2():
print(q8.get()," ",q9.get()," ",q10.get()," ",q11.get())
#b_show2 = tk.Button(window, text='Show', width=25, command = show2)
#b_show2.pack()
b_next2 = tk.Button(window, text='Next', fg = "green",width=25, command = window.destroy)
b_next2.pack()
mainloop()
show2()
#New page 3
window = tk.Tk()
window.title('Questionare P3')
window.geometry("400x550")
# Q 12
q12 = IntVar()
l12 = Label(window, text='Какие семинары вы проходили?', )
l12.pack()
Radiobutton(window, text='Облачные вычисления', variable=q12, value=0).pack(anchor=W)
Radiobutton(window, text='Безопасность баз данных', variable=q12, value=2).pack(anchor=W)
Radiobutton(window, text='Веб-технологии', variable=q12, value=7).pack(anchor=W)
Radiobutton(window, text='Наука о данных', variable=q12, value=1).pack(anchor=W)
Radiobutton(window, text='Тестирование', variable=q12, value=6).pack(anchor=W)
Radiobutton(window, text='Взлом', variable=q12, value=4).pack(anchor=W)
Radiobutton(window, text='Разработка игр', variable=q12, value=3).pack(anchor=W)
Radiobutton(window, text='Системное проектирование', variable=q12, value=5).pack(anchor=W)
# Q 13
q13 = IntVar()
l13 = Label(window, text='Взяли ли вы тест на выявления своего таланта?', )
l13.pack()
Radiobutton(window, text='Да', variable=q13, value=1).pack(anchor=W)
Radiobutton(window, text='Нет', variable=q13, value=0).pack(anchor=W)
# Q 14
q14 = IntVar()
l14 = Label(window, text='Участвовали вы на олимпиадах?', )
l14.pack()
Radiobutton(window, text='Да', variable=q14, value=1).pack(anchor=W)
Radiobutton(window, text='Нет', variable=q14, value=0).pack(anchor=W)
# Q 15
q15 = IntVar()
l15 = Label(window, text='Какой у вас уровень навыков чтения и письма?', )
l15.pack()
Radiobutton(window, text='Отлично', variable=q15, value=0).pack(anchor=W)
Radiobutton(window, text='Среднее', variable=q15, value=1).pack(anchor=W)
Radiobutton(window, text='Плохо', variable=q15, value=2).pack(anchor=W)
def show3():
print(q12.get()," ",q13.get()," ",q14.get()," ",q15.get())
#b_show3 = tk.Button(window, text='Show', width=25, command = show2)
#b_show3.pack()
b_next3 = tk.Button(window, text='Next', fg = "green",width=25, command = window.destroy)
b_next3.pack()
mainloop()
show3()
# new page 4
window = tk.Tk()
window.title('Questionare P4')
window.geometry("400x650")
# Q 16
q16 = IntVar()
l16 = Label(window, text='Какая у вас память?' )
l16.pack()
Radiobutton(window, text='Отличная', variable=q16, value=0).pack(anchor=W)
Radiobutton(window, text='Нормальная', variable=q16, value=1).pack(anchor=W)
Radiobutton(window, text='Плохая', variable=q16, value=2).pack(anchor=W)
# Q 17
q17 = IntVar()
l17 = Label(window, text='Интересующиеся вас предметы?' )
l17.pack()
Radiobutton(window, text='Облачные вычисления', variable=q17, value=4).pack(anchor=W)
Radiobutton(window, text='Сети', variable=q17, value=7).pack(anchor=W)
Radiobutton(window, text='Взлом', variable=q17, value=6).pack(anchor=W)
Radiobutton(window, text='Компьютерная архитектура', variable=q17, value=0).pack(anchor=W)
Radiobutton(window, text='Программирование', variable=q17, value=9).pack(anchor=W)
Radiobutton(window, text='Параллельные вычисления', variable=q17, value=8).pack(anchor=W)
Radiobutton(window, text='Интернет вещей', variable=q17, value=1).pack(anchor=W)
Radiobutton(window, text='Инженерия данных', variable=q17, value=5).pack(anchor=W)
Radiobutton(window, text='Программная инженерия', variable=q17, value=3).pack(anchor=W)
Radiobutton(window, text='Менеджмент', variable=q17, value=2).pack(anchor=W)
# Q 18
q18 = IntVar()
l18 = Label(window, text='Заинтересованная область карьеры? ' )
l18.pack()
Radiobutton(window, text='Разработчик системы', variable=q18, value=4).pack(anchor=W)
Radiobutton(window, text='Аналитик бизнес-процессов', variable=q18, value=0).pack(anchor=W)
Radiobutton(window, text='Разработчик', variable=q18, value=2).pack(anchor=W)
Radiobutton(window, text='Тестирование', variable=q18, value=5).pack(anchor=W)
Radiobutton(window, text='Безопасность', variable=q18, value=3).pack(anchor=W)
Radiobutton(window, text='Облачные вычисления', variable=q18, value=1).pack(anchor=W)
def show4():
print(q16.get()," ",q17.get()," ",q18.get())
#b_show4 = tk.Button(window, text='Show', width=25, command = show4)
#b_show4.pack()
b_next4 = tk.Button(window, text='Next', fg = "green",width=25, command = window.destroy)
b_next4.pack()
mainloop()
show4()
# new page 5
window = tk.Tk()
window.title('Questionare P5')
window.geometry("400x550")
# Q 19
q19= IntVar()
l19 = Label(window, text='Что для вас важнее иметь высшее образование или опыт работы?', )
l19.pack()
Radiobutton(window, text='Высшее образование', variable=q19, value=0).pack(anchor=W)
Radiobutton(window, text='Работа', variable=q19, value=1).pack(anchor=W)
# Q 20
q20= IntVar()
l20 = Label(window, text='Тип кампании в которой вы бы хотели работать?', )
l20.pack()
Radiobutton(window, text='Веб-услуги', variable=q20, value=8).pack(anchor=W)
Radiobutton(window, text='Услуги SaaS', variable=q20, value=4).pack(anchor=W)
Radiobutton(window, text='Продажи и маркетинг', variable=q20, value=5).pack(anchor=W)
Radiobutton(window, text='Услуги по тестированию и обслуживанию', variable=q20, value=7).pack(anchor=W)
Radiobutton(window, text='Разработка продукта', variable=q20, value=9).pack(anchor=W)
Radiobutton(window, text='BPA', variable=q20, value=0).pack(anchor=W)
Radiobutton(window, text='Сервис', variable=q20, value=6).pack(anchor=W)
Radiobutton(window, text='Продукт', variable=q20, value=3).pack(anchor=W)
Radiobutton(window, text='Облачные услуги', variable=q20, value=1).pack(anchor=W)
Radiobutton(window, text='Финансы', variable=q20, value=2).pack(anchor=W)
# Q 21
q21= IntVar()
l21 = Label(window, text='На вашу профессию влияют взгляды старших людей?', )
l21.pack()
Radiobutton(window, text='Да', variable=q21, value=1).pack(anchor=W)
Radiobutton(window, text='Нет', variable=q21, value=0).pack(anchor=W)
# Q 22
q22= IntVar()
l22 = Label(window, text='Увлекаетесь видеоиграми?', )
l22.pack()
Radiobutton(window, text='Да', variable=q22, value=1).pack(anchor=W)
Radiobutton(window, text='Нет', variable=q22, value=0).pack(anchor=W)
#
def show5():
print(q19.get()," ",q20.get()," ",q21.get()," ",q22.get())
#b_show5 = tk.Button(window, text='Show', width=25, command = show5)
#b_show5.pack()
b_next5= tk.Button(window, text='Next', fg = "green",width=25, command = window.destroy)
b_next5.pack()
mainloop()
show5()
# new page 6
window = tk.Tk()
window.title('Questionare P6')
window.geometry("400x900")
# Q 23
q23= IntVar()
l23 = Label(window, text='Интересующийся жанры книг?', )
l23.pack()
Radiobutton(window, text='Молитвенные книги', variable=q23, value=21).pack(anchor=W)
Radiobutton(window, text='Детские', variable=q23, value=5).pack(anchor=W)
Radiobutton(window, text='О путешествиях', variable=q23, value=29).pack(anchor=W)
Radiobutton(window, text='Романтические', variable=q23, value=23).pack(anchor=W)
Radiobutton(window, text='Кулинарные книги', variable=q23, value=7).pack(anchor=W)
Radiobutton(window, text='Саморазвитие', variable=q23, value=27).pack(anchor=W)
Radiobutton(window, text='Драма', variable=q23, value=10).pack(anchor=W)
Radiobutton(window, text='Математика', variable=q23, value=18).pack(anchor=W)
Radiobutton(window, text='Религия-духовность', variable=q23, value=22).pack(anchor=W)
Radiobutton(window, text='Антология', variable=q23, value=1).pack(anchor=W)
Radiobutton(window, text='Трилогия', variable=q23, value=30).pack(anchor=W)
Radiobutton(window, text='Автобиографии', variable=q23, value=3).pack(anchor=W)
Radiobutton(window, text='Мистерия', variable=q23, value=19).pack(anchor=W)
Radiobutton(window, text='Дневники', variable=q23, value=8).pack(anchor=W)
Radiobutton(window, text='Журналы', variable=q23, value=17).pack(anchor=W)
Radiobutton(window, text='История', variable=q23, value=15).pack(anchor=W)
Radiobutton(window, text='Искусство', variable=q23, value=2).pack(anchor=W)
Radiobutton(window, text='Словари', variable=q23, value=9).pack(anchor=W)
Radiobutton(window, text='Ужас', variable=q23, value=16).pack(anchor=W)
Radiobutton(window, text='Энциклопедии', variable=q23, value=11).pack(anchor=W)
Radiobutton(window, text='Экшн и приключения', variable=q23, value=0).pack(anchor=W)
Radiobutton(window, text='Фэнтези', variable=q23, value=12).pack(anchor=W)
Radiobutton(window, text='Комиксы', variable=q23, value=6).pack(anchor=W)
Radiobutton(window, text='Научная фантастика', variable=q23, value=26).pack(anchor=W)
Radiobutton(window, text='Серия', variable=q23, value=28).pack(anchor=W)
Radiobutton(window, text='Руководство', variable=q23, value=13).pack(anchor=W)
Radiobutton(window, text='Биографии', variable=q23, value=4).pack(anchor=W)
Radiobutton(window, text='Здоровье', variable=q23, value=14).pack(anchor=W)
Radiobutton(window, text='Сатира', variable=q23, value=24).pack(anchor=W)
Radiobutton(window, text='Наука', variable=q23, value=25).pack(anchor=W)
Radiobutton(window, text='Поэзия', variable=q23, value=20).pack(anchor=W)
b_next6= tk.Button(window, text='Next', fg = "green",width=25, command = window.destroy)
b_next6.pack()
mainloop()
print(q23)
# new page 7
window = tk.Tk()
window.title('Questionare P7')
window.geometry("400x650")
# Q 24
q24= IntVar()
l24 = Label(window, text='Ожидаемая цель, зарплата или работа?', )
l24.pack()
Radiobutton(window, text='Зарплата', variable=q24, value=1).pack(anchor=W)
Radiobutton(window, text='Работа', variable=q24, value=0).pack(anchor=W)
# Q 25
q25= IntVar()
l25 = Label(window, text='Вы в отношениях?', )
l25.pack()
Radiobutton(window, text='Да', variable=q25, value=1).pack(anchor=W)
Radiobutton(window, text='Нет', variable=q25, value=0).pack(anchor=W)
# Q 26
q26= IntVar()
l26 = Label(window, text='Какое у вас поведения?', )
l26.pack()
Radiobutton(window, text='Упрямый/упертый', variable=q26, value=1).pack(anchor=W)
Radiobutton(window, text='Нежный/мягкий', variable=q26, value=0).pack(anchor=W)
# Q 27
q27= IntVar()
l27 = Label(window, text='Вы хотите работать в управляющим отделе или в техническом?', )
l27.pack()
Radiobutton(window, text='Управляющий', variable=q27, value=0).pack(anchor=W)
Radiobutton(window, text='Технический', variable=q27, value=1).pack(anchor=W)
# Q 28
q28= IntVar()
l28 = Label(window, text='Вас мотивирует работа или зарплата?', )
l28.pack()
Radiobutton(window, text='Зарплата', variable=q28, value=0).pack(anchor=W)
Radiobutton(window, text='Работа', variable=q28, value=1).pack(anchor=W)
# Q 29
q29= IntVar()
l29 = Label(window, text='Вы работаете усердно или с умом?', )
l29.pack()
Radiobutton(window, text='Усердно', variable=q29, value=0).pack(anchor=W)
Radiobutton(window, text='С умом', variable=q29, value=1).pack(anchor=W)
# Q 30
q30= IntVar()
l30 = Label(window, text='Вы когда нибудь работали в команде?', )
l30.pack()
Radiobutton(window, text='Да', variable=q30, value=1).pack(anchor=W)
Radiobutton(window, text='Нет', variable=q30, value=0).pack(anchor=W)
# Q 31
q31= IntVar()
l31 = Label(window, text='Вы интроверт?', )
l31.pack()
Radiobutton(window, text='Да', variable=q31, value=1).pack(anchor=W)
Radiobutton(window, text='Нет', variable=q31, value=0).pack(anchor=W)
b_next7= tk.Button(window, text='Get Results', fg = "green",width=25, command = window.destroy)
b_next7.pack()
mainloop()
New_human = [q1.get(),
q2.get(),
q3.get(),q4.get(),q5.get(),q6.get(),q7.get(),
q8.get(),q9.get(),q10.get(),q11.get(),q12.get(),q13.get(),q14.get(),
q15.get(),q16.get(),q17.get(),q18.get(),q19.get(),q20.get(),q21.get(),
q22.get(),q23.get(),q24.get(),q25.get(),q26.get(),q27.get(),q28.get(),
q29.get(),q30.get(),q31.get()]
print("Human: ",New_human)
df = pd.read_csv("career_pred.csv")
df = df.iloc[: , 7:]
data = df.iloc[:,:-1].values
NN = Normalizer().fit(data[:,:6])
new_6 =NN.transform([[q1.get(),q2.get(),q3.get(),q4.get(),q5.get(),q6.get()]])
New_human[0] = new_6[0][0]
New_human[1] = new_6[0][1]
New_human[2] = new_6[0][2]
New_human[3] = new_6[0][3]
New_human[4] = new_6[0][4]
New_human[5] = new_6[0][5]
print("New_human: ",New_human)
pr = knn.predict([New_human])
text1 = ''
if pr==1:
text1 ="Design"
if pr==2:
text1 ="Manager"
if pr==0:
text1 ="Business"
if pr==3:
text1 ="Technical"
# new page 7
window = tk.Tk()
window.title('Answer')
window.geometry("150x100")
An = Label(window, text=text1, )
An.pack()
| null |
Prof_Orientation_model.py
|
Prof_Orientation_model.py
|
py
| 23,948 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.LabelEncoder",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.Normalizer",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 398,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 454,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 514,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 522,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 577,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 586,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 628,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 634,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 712,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 727,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.Normalizer",
"line_number": 731,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 754,
"usage_type": "call"
}
] |
275804822
|
import base64
from datetime import datetime
import hashlib
import hmac
import json
import base64
import hashlib
import hmac
import random
import socket
import time
import requests
import re
def create_signature(secret_key, method, md5, ctype, date, uri):
# Get the string to sign
string_sign = string_to_sign(method, md5, ctype, date, uri)
# Compute the authorization header
hmac_sha1 = hmac.new(secret_key.encode(), string_sign.encode(), hashlib.sha1).digest()
computed_sig = base64.b64encode(hmac_sha1)
return computed_sig
def string_to_sign(method, md5, ctype, date, uri):
"Returns the string to sign"
parts = []
# Add the components
parts.append(method.upper())
parts.append(str(md5))
parts.append(str(ctype))
if date:
parts.append(str(date))
parts.append(str(uri))
return str("\n".join(parts))
ISO_DATE_RE = re.compile(
"(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})T(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})\.?(?P<microsec>\d{6})?")
def isodate_to_datetime(s, match=None):
"Converts an ISO 8601 string to a datetime object"
if match is None:
match = ISO_DATE_RE.match(s)
if match:
year, month, day, hour, minute, second, sub = map(
lambda x: int(x) if x else 0, match.groups())
print(sub)
return datetime(year, month, day, hour, minute, second, sub)
return None
def trace_request(data, url):
d = json.loads(data)
date = isodate_to_datetime(d["date"])
datestring = date.strftime("%Y-%m-%d %H:%M:%S.%f")
print(datestring)
md5 = base64.b64encode(hashlib.md5(data.encode()).digest())
endpoint = "http://api.kiip.me/2.0/{}/?r={}"
curlurl = endpoint.format(url,time.time)
jaegertoken = "trace-{}".format("f6f07e39617364e0")
signature = create_signature("3b46e5f42299f1697193bb843ed8dbf4", "Post", md5, "application/json", datestring, curlurl)
headers = {
'Date' : datestring,
'Content-Type' : 'application/json',
'jaeger-debug-id': jaegertoken,
'Content-MD5' : md5,
'Authorization' : "KiipV2 %s:%s".format("3b46e5f42299f1697193bb843ed8dbf4", signature)
}
r = requests.post(curlurl,data=data,headers=headers)
r.json()
return json.loads(r.content),jaegertoken
| null |
app/trace.py
|
trace.py
|
py
| 2,291 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "hmac.new",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "hashlib.sha1",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "base64.b64encode",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "base64.b64encode",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "hashlib.md5",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "requests.post",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 71,
"usage_type": "call"
}
] |
500187938
|
# -*- coding: utf-8 -*-
# from __future__ import unicode_literals
import os
import json
import requests
from time import sleep
from kbot.log import Log
from kbot.message import Message
from kbot.book.common import BookSearchQuery
class CalilService(object):
CALIL_BASE_URL = "http://api.calil.jp/check"
@classmethod
def get_one_book(cls, query):
systemid1 = "Tokyo_Nerima"
systemid2 = "Special_Jil"
query.set("systemid", systemid1 + "," + systemid2)
json_data = CalilService.__request(CalilQuery.adjust_first_query(query))
json_data = CalilService.__polling(json_data)
book1 = CalilService.__get_one_book_from_json(json_data, query.get("isbn"), systemid1)
# book2 = CalilService.__get_one_book_from_json(json_data, query.get('isbn'), systemid2)
return book1
@classmethod
def __polling(cls, json_data):
while json_data["continue"] == 1:
sleep(2)
query = BookSearchQuery()
query.set("session", json_data["session"])
json_data = CalilService.__polling_request(CalilQuery.adjust_next_query(query))
return json_data
@classmethod
def __get_one_book_from_json(cls, json_data, isbn, systemid):
reserve_info = json_data.get("books").get(isbn).get(systemid)
status = reserve_info.get("status")
if status != "OK" and status != "Cache":
return CalilBook(isbn, {})
return CalilBook(isbn, reserve_info)
@classmethod
def __request(cls, query):
response = CalilService.__request_sub(query)
json_data = response.json() # TODO:nullチェック
Log.info(json.dumps(json_data, sort_keys=True, indent=4))
return json_data
@classmethod
def __polling_request(cls, query):
response = CalilService.__request_sub(query)
# 2回目以降のレスポンスはJSONP固定になるため
json_string = response.text[9:-2]
json_data = json.loads(json_string)
Log.info(json.dumps(json_data, sort_keys=True, indent=4))
return json_data
@classmethod
def __request_sub(cls, query):
response = requests.get(CalilService.CALIL_BASE_URL, params=query)
return response
class CalilQuery(object):
@classmethod
def __set_common(cls, query):
query.set("appkey", os.environ["CALIL_APP_KEY"])
query.set("format", "json")
return query
@classmethod
def adjust_first_query(cls, query):
query = CalilQuery.__set_common(query)
query.set("callback", "no")
return query.dict()
@classmethod
def adjust_next_query(cls, query):
query = CalilQuery.__set_common(query)
return query.dict()
class CalilBook(object):
def __init__(self, isbn, json):
self.isbn = isbn
self.reserveurl = json.get("reserveurl", "")
self.libkey = json.get("libkey", "")
self.id = self.reserveurl.split("=")[-1]
self.kbot_reserve_url = (
"https://" + os.environ["MY_SERVER_NAME"] + "/kbot/library/reserve?book_id="
)
self.log()
def log(self):
Log.info("isbn : " + self.isbn)
Log.info("reserveurl : " + self.reserveurl)
Log.info("libkey : " + str(self.libkey))
Log.info("id : " + self.id)
Log.info("kbot_reserve_url : " + self.kbot_reserve_url)
def get_text_message(self):
return Message.create_text_by_object(self)
| null |
kbot/book/calil.py
|
calil.py
|
py
| 3,489 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "time.sleep",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "kbot.book.common.BookSearchQuery",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "kbot.log.Log.info",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "kbot.log.Log",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "kbot.log.Log.info",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "kbot.log.Log",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "json.get",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "json.get",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "kbot.log.Log.info",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "kbot.log.Log",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "kbot.log.Log.info",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "kbot.log.Log",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "kbot.log.Log.info",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "kbot.log.Log",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "kbot.log.Log.info",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "kbot.log.Log",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "kbot.log.Log.info",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "kbot.log.Log",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "kbot.message.Message.create_text_by_object",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "kbot.message.Message",
"line_number": 108,
"usage_type": "name"
}
] |
410339514
|
def numero_materiales(elemento,material,nombre):
from lxml import etree
lista = []
for elem in elemento:
lista.append(elem.text)
lista_materiales = []
lista_muro = []
lista_tabiques = []
lista_cubierta = []
lista_interno = []
lista_terreno = []
lista_medianera = []
for elem in material:
lista_materiales.append(elem.text)
lista_muro.append(lista_materiales[0:5])
lista_tabiques.append(lista_materiales[5:8])
lista_cubierta.append(lista_materiales[8:14])
lista_interno.append(lista_materiales[14:19])
lista_terreno.append(lista_materiales[19:23])
lista_medianera.append(lista_materiales[23::])
if nombre == "Muro Exterior":
return(len(lista_muro[0]))
elif nombre == "Tabiques":
return(len(lista_tabiques[0]))
elif nombre == "Cubierta":
return(len(lista_cubierta[0]))
elif nombre == "Forjado Interno":
return(len(lista_interno[0]))
elif nombre == "Forjado Terreno":
return(len(lista_terreno[0]))
elif nombre == "Medianera":
return(len(lista_medianera[0]))
else:
return("Error : Ese elemento no existe")
def conductividad_material(conductividad,lista_conductividad):
from lxml import etree
if conductividad in lista_conductividad:
posicion = lista_conductividad.index(conductividad)
return(print("Nombre del material: ",lista_conductividad[int(posicion - 1)]))
else:
return(print("No existe la Conductividad Térmica"))
def espacios(nombre_espacio):
from lxml import etree
residencial = etree.parse('EjemploResidencial.xml')
espacios = residencial.findall('CondicionesFuncionamientoyOcupacion/Espacio')
lista_espacios = []
for elem in espacios:
lista_espacios.append(elem.find("Nombre").text)
lista_espacios.append(elem.find("Superficie").text)
lista_espacios.append(elem.find("NivelDeAcondicionamiento").text)
lista_espacios.append(elem.find("PerfilDeUso").text)
if nombre_espacio == lista_espacios[0]:
return(print("Superficie:",lista_espacios[1],".NivelDeAcondicionamiento:",lista_espacios[2],".PerfilDeUso",lista_espacios[3]))
elif nombre_espacio == lista_espacios[4]:
return(print("Superficie:",lista_espacios[5],".NivelDeAcondicionamiento:",lista_espacios[6],".PerfilDeUso",lista_espacios[7]))
elif nombre_espacio == lista_espacios[8]:
return(print("Superficie:",lista_espacios[9],".NivelDeAcondicionamiento:",lista_espacios[10],"PerfilDeUso",lista_espacios[11]))
elif nombre_espacio == lista_espacios[12]:
return(print("Superficie:",lista_espacios[13],"NivelDeAcondicionamiento:",lista_espacios[14],"PerfilDeUso",lista_espacios[15]))
elif nombre_espacio == lista_espacios[16]:
return(print("Superficie:",lista_espacios[17],"NivelDeAcondicionamiento:",lista_espacios[18],"PerfilDeUso",lista_espacios[19]))
else:
return(print("Error no existe el espacio introducido"))
def densidad(nombre_material):
from lxml import etree
residencial = etree.parse('EjemploResidencial.xml')
materiales = residencial.findall("DatosEnvolventeTermica/CerramientosOpacos/Elemento/Capas/Capa")
lista_densidad = []
for elem in materiales:
lista_densidad.append(elem.find("Material").text)
lista_densidad.append(elem.find("Densidad").text)
if nombre_material in lista_densidad:
posicion = lista_densidad.index(nombre_material)
return(print("La densidad es:",lista_densidad[posicion + 1]))
else:
return(print("Error ese material no existe"))
| null |
Funciones_xml.py
|
Funciones_xml.py
|
py
| 3,322 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "lxml.etree.parse",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "lxml.etree.parse",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 68,
"usage_type": "name"
}
] |
498276825
|
import pandas as pd
from datetime import date
from main.dataframe_ops import transform_df
class SimulationA:
tx_fee = 0.65
def __init__(self, principal):
self.principal = principal
self.principal_init = principal
self.owned_shares = 0
self.owned_shares_hodling = 0
def run_strat(self,symbol0,symbol1,buying_threshold):
print(symbol0 + "-" + symbol1)
# Prepare data
df = pd.read_csv('../main/922.csv', index_col='Date', parse_dates=True)
df_symbol0 = df[[symbol0]]
col_list = [symbol0, symbol1]
df = df[col_list]
# Get price dataframes
df_prices = df
# Calculation when HODLING
most_recent_price = df_prices.iloc[1][symbol0]
oldest_price = df_prices.iloc[-2][symbol0]
self.owned_shares_hodling = int(self.principal_init/oldest_price)
# Transform data
df_transformed = transform_df(df,symbol0)
# Start strat
worth_strategy = []
worth_hodling_array = []
prev = 0
for index, row in df_transformed.iterrows():
#data needed for strat
d_truncated = date(index.year, index.month, index.day)
increase_symbol1 = "%.2f" %row[symbol1]
price_symbol0 = "%.2f" % df_prices.loc[d_truncated][symbol0]
if float(increase_symbol1) > buying_threshold :
#buy all we can with what we have - AT CURRENT DAY
if float(self.principal) > float(price_symbol0):
self.buy_max_shares(price_symbol0)
else:
# price_symbol0_t_plus1d = "%.2f" % df_prices.shift(1).loc[d_truncated][symbol0]
# print("price_symbol0_t_plus1d : " + str(price_symbol0_t_plus1d))
self.sell_all_shares(price_symbol0)
#prev = increase_symbol1
worth_strategy.insert(0, self.principal)
#for comparaison
worth_hodling = self.owned_shares_hodling * float(price_symbol0)
worth_hodling_array.insert(0, worth_hodling)
#last sell all
self.sell_all_shares(most_recent_price)
profit_strat = float(self.principal)-float(self.principal_init)
#Graph
df_symbol0.drop(df.tail(1).index, inplace=True) # drop last n rows
df_symbol0.drop(df.head(1).index, inplace=True) # drop first n rows
df_symbol0['STRAT'] = worth_strategy
df_symbol0['HODLING'] = worth_hodling_array
# df_symbol0.plot()
# plt.interactive(False)
# plt.show(block=True)
profit_hodling = worth_hodling_array[0] - self.principal_init
diff_strat_hodl = profit_strat - profit_hodling
print("PERFORMANCE : " + str("%.2f" % diff_strat_hodl))
return diff_strat_hodl
def sell_all_shares(self,price):
if int(self.owned_shares) > 0:
total_sell_order = float(price) * self.owned_shares
total_sell_order -= SimulationA.tx_fee # selling comission
self.owned_shares = 0
self.principal = float(self.principal) + total_sell_order
def buy_max_shares(self,price):
qty = int(float(self.principal) / float(price))
total_buy_order = float(price) * qty
total_buy_order -= SimulationA.tx_fee # buying comission
self.owned_shares = self.owned_shares + qty
self.principal = float(self.principal) - total_buy_order
| null |
simulator/SimulationA.py
|
SimulationA.py
|
py
| 3,495 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_csv",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "main.dataframe_ops.transform_df",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 49,
"usage_type": "call"
}
] |
610380581
|
from django.shortcuts import render,redirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm,UserCreationForm
from django.contrib.auth import authenticate,login
from django.contrib.auth.models import User
from django.core.paginator import Paginator,PageNotAnInteger,EmptyPage
from django.http import HttpResponse,StreamingHttpResponse
from sanliuyunapp.form import registerForm,loginForm, ArticleForm,uploadArtForm,addForm
from sanliuyunapp.models import Person, Article
from django.core.exceptions import ObjectDoesNotExist,ValidationError
def indexView(request):
context = {}
return render(request,'index.html',context)
def uploadView(request):
context = {}
if request.method == 'GET':
form = uploadArtForm
if request.method == 'POST':
form = uploadArtForm(request.POST,request.FILES)
if form.is_valid():
user_id = request.user.user_profile.id
upload_art = request.FILES['uploadArt']
headline = upload_art.name.split('.')[0]
art = Article(headline = headline,local_article=upload_art)
art.save()
art.author.add(user_id)
art.save()
try:
target = "sanliuyunapp/static/uploads/localArt/{}".format(upload_art.name)
with open(target,'r') as fs:
text = fs.read()
except FileNotFoundError:
return HttpResponse('文件名不能包含空格,点等特殊字符')
fs.close()
art.text = text
art.save()
return redirect(to='desktop')
context['form'] = form
return render(request,'upload.html',context)
def downloadArtView(request,art_name):
context = {}
user_id = request.user.id
headline = art_name
art = Article.objects.get(id = art_name)
content = art.text
target = "{}.doc".format(headline)
with open(target,'w') as fs:
for chunk in content:
fs.write(chunk)
fs.close()
return HttpResponse('已经导出到项目根目录')
return render(request,'desktop.html',context)
@login_required(redirect_field_name='login',login_url='login')
def desktopView(request):
context = {}
user_id = request.user.user_profile.id
print(user_id)
art = Article.objects.filter(author = user_id).order_by('-save_time')
page_robot = Paginator(art,15)
page_num = request.GET.get('page')
if page_num:
page = int(page_num)
else:
page = 1#不加index 跳转进来会报错
try:
art = page_robot.page(page_num)
except EmptyPage:
art = page_robot.page(page_robot.num_pages)
except PageNotAnInteger:
art = page_robot.page(1)
if page == int(page_robot.num_pages):
page_range = page_robot.page_range[page-5:page_robot.num_pages]
elif page == int(page_robot.num_pages)-1:
page_range = page_robot.page_range[page-4:page_robot.num_pages+1]
elif page <= 2:
page_range = page_robot.page_range[0:5-page+page]
else:
page_range = page_robot.page_range[page-3 :page+2]
context['art'] =art
context['page_range'] =page_range
return render(request,'desktop.html',context)
def loginView(request):
context={}
if request.method == 'GET':
form = loginForm
if request.method == 'POST':
form = loginForm(request.POST)
if form.is_valid():
inputName = form.cleaned_data['inputName']
password = form.cleaned_data['password']
user = authenticate(username =inputName,password = password)
if user:
login(request,user)
return redirect(to = 'index')
else:
person = Person.objects.get(email_address = inputName)
inputName = person.nickname
user = authenticate(username =inputName,password = password)
login(request,user)
return redirect(to = 'index')
return redirect(to = 'index')
context['form']= form
return render(request,'login.html',context)
def registerView(request):
context={}
if request.method == 'GET':
form = registerForm
if request.method == 'POST':
form = registerForm(request.POST)
if form.is_valid():
password2 = form.cleaned_data['password2']
nickname = form.cleaned_data['nickname']
email_address = form.cleaned_data['email_address']
new_Person = User.objects.create_user(username=nickname,email=email_address,password = password2)
new_Person.save()
person = Person(belong_to= new_Person,nickname=nickname,email_address= email_address)
person.save()
user = authenticate(username =nickname,password = password2)
login(request,user)
return redirect(to = 'index')
context['form']= form
return render(request,'register.html',context)
@login_required(redirect_field_name='login',login_url='login')
def deleteArtView(request,art_name):
context = {}
try:
art_del = Article.objects.get(id = art_name)
if request.method == 'GET':
user_id = request.user.user_profile.id
art = Article.objects.filter(author = user_id).order_by('-save_time')
page_robot = Paginator(art,15)
page_num = request.GET.get('page')
if page_num:
page = int(page_num)
else:
page = 1#不加index 跳转进来会报错
try:
art = page_robot.page(page_num)
except EmptyPage:
art = page_robot.page(page_robot.num_pages)
except PageNotAnInteger:
art = page_robot.page(1)
if page == int(page_robot.num_pages):
page_range = page_robot.page_range[page-5:page_robot.num_pages]
elif page == int(page_robot.num_pages)-1:
page_range = page_robot.page_range[page-4:page_robot.num_pages+1]
elif page <= 2:
page_range = page_robot.page_range[0:5-page+page]
else:
page_range = page_robot.page_range[page-3 :page+2]
context['art'] =art
context['page_range'] =page_range
if request.method == 'POST':
art_del.delete()
return redirect('desktop')
except:
return HttpResponse('页面已经删除了~')
return render(request,'deleteArt.html',context)
def deleteResultView(request):
context = {}
return render(request,'deleteResult.html',context)
@login_required(redirect_field_name='login',login_url='login')
def editorView(request,art_name):
context = {}
if request.method == 'GET':
art = Article.objects.get(id = art_name)
form = ArticleForm(
initial={'headline':art.headline,'content':art.text}
)
if request.method == 'POST':
form = ArticleForm(request.POST)
if form.is_valid():
# headline = form.cleaned_data['headline']
# content = form.cleaned_data['content']
# art = artold(headline=headline, text=content)
art = Article.objects.get(id = art_name)
art.headline = form.cleaned_data['headline']
art.text = form.cleaned_data['content']
art.save()
user_id = request.user.user_profile.id
art.author.add(user_id)
art.save()
return redirect(to='editorAdd',art_name=art_name)
article = Article.objects.get(id=art_name)
context['form'] = form
context['article'] = article
return render(request, 'editoring.html', context)
# def editorArtView(request,art_name):
# context = {}
# if request.method == 'GET':
# art = Article.objects.get(id = art_name)
# form = ArticleForm(
# initial={'headline':art.headline,'content':art.text}
# )
# if request.method == 'POST':
# form = ArticleForm(request.POST)
# if form.is_valid():
# headline = form.cleaned_data['headline']
# content = form.cleaned_data['content']
# art = Article(headline=headline, text=content)
# art.save()
# user_id = request.user.user_profile.id
# art.author.add(user_id)
# art.save()
# return redirect('desktop')
# context['form']=form
# return render(request,'editoring.html',context)
@login_required(redirect_field_name='login',login_url='login')
def editorNewView(request):
context = {}
if request.method == 'GET':
form = ArticleForm
if request.method == 'POST':
form = ArticleForm(request.POST)
if form.is_valid():
headline = form.cleaned_data['headline']
content = form.cleaned_data['content']
art = Article(headline=headline, text=content)
art.save()
user_id = request.user.user_profile.id
art.author.add(user_id)
art.save()
a = art.id
return redirect(to='editorAdd',art_name=a)
# article = Article.objects.get(id=art_name)
context['form'] = form
# context['article'] = article
return render(request, 'editor_new.html', context)
@login_required(redirect_field_name='login',login_url='login')
def editorAddView(request,art_name):
context = {}
if request.method == 'GET':
form = addForm
art = Article.objects.get(id = art_name)
if request.method == 'POST':
form = addForm(request.POST)
art = Article.objects.get(id = art_name)
if form.is_valid():
adduser = form.cleaned_data['addName']
try:
user_judge1 = Person.objects.get(nickname = adduser)
if user_judge1:
user_id = user_judge1.id
art.author.add(user_id)
art.save()
except:
try:
user_judge2 = Person.objects.get(email_address = adduser)
if user_judge2:
user_id = user_judge2.id
art.author.add(user_id)
art.save()
except:
return HttpResponse('用户名/邮箱不存在')
# user_id = request.user.id
context['form'] = form
context['art'] =art
return render(request, 'editor_add.html', context)
| null |
sanliuyunsite/sanliuyunapp/views.py
|
views.py
|
py
| 10,861 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.shortcuts.render",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.form.uploadArtForm",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "sanliuyunapp.form.uploadArtForm",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.models.Article",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.models.Article.objects.get",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.models.Article.objects",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "sanliuyunapp.models.Article",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.models.Article.objects.filter",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.models.Article.objects",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "sanliuyunapp.models.Article",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.EmptyPage",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.PageNotAnInteger",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.form.loginForm",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "sanliuyunapp.form.loginForm",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.models.Person.objects.get",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.models.Person.objects",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "sanliuyunapp.models.Person",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.form.registerForm",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "sanliuyunapp.form.registerForm",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects.create_user",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "sanliuyunapp.models.Person",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.models.Article.objects.get",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.models.Article.objects",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "sanliuyunapp.models.Article",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "sanliuyunapp.models.Article.objects.filter",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.models.Article.objects",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "sanliuyunapp.models.Article",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.EmptyPage",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.PageNotAnInteger",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.models.Article.objects.get",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.models.Article.objects",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "sanliuyunapp.models.Article",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "sanliuyunapp.form.ArticleForm",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.form.ArticleForm",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.models.Article.objects.get",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.models.Article.objects",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "sanliuyunapp.models.Article",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.models.Article.objects.get",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.models.Article.objects",
"line_number": 205,
"usage_type": "attribute"
},
{
"api_name": "sanliuyunapp.models.Article",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.form.ArticleForm",
"line_number": 236,
"usage_type": "name"
},
{
"api_name": "sanliuyunapp.form.ArticleForm",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.models.Article",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.form.addForm",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "sanliuyunapp.models.Article.objects.get",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.models.Article.objects",
"line_number": 261,
"usage_type": "attribute"
},
{
"api_name": "sanliuyunapp.models.Article",
"line_number": 261,
"usage_type": "name"
},
{
"api_name": "sanliuyunapp.form.addForm",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.models.Article.objects.get",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.models.Article.objects",
"line_number": 264,
"usage_type": "attribute"
},
{
"api_name": "sanliuyunapp.models.Article",
"line_number": 264,
"usage_type": "name"
},
{
"api_name": "sanliuyunapp.models.Person.objects.get",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.models.Person.objects",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "sanliuyunapp.models.Person",
"line_number": 268,
"usage_type": "name"
},
{
"api_name": "sanliuyunapp.models.Person.objects.get",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "sanliuyunapp.models.Person.objects",
"line_number": 276,
"usage_type": "attribute"
},
{
"api_name": "sanliuyunapp.models.Person",
"line_number": 276,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 256,
"usage_type": "call"
}
] |
384755586
|
import numpy as np
import matplotlib.pyplot as plt
from ZachsPackage import Display as D
L = 0.15 # Fin length
Tb = 500 # Base temperature K
k = 16.0 # Thermal conductivity W/mK
Ac = 1.67e-4 # Cross sectional area m^2
P = 0.066 # Perimeter length m
Tf = 300 # Free stream temperature K
h = 25 # Heat transfer coefficient W/m^2
TL = 350 # Fixed tip temperature K
dx = 0.03 # Step size
m = h*P/(k*Ac)
"""
Zachary Preator
Code Mastery 10/10
Numerical Analysis 8/10
Technical Writing 8/10
"""
def dTdUdx(x, var):
""" ODE represented as two first order ODEs"""
T = var[0]
U = var[1]
dTdx = U
dUdx = h*P/k/Ac*(T-Tf)
return np.array([dTdx,dUdx])
def TemperatureFDM(T1, T2):
""" Represents the forward differencing scheme solved for T"""
return -(-dx**2*m*Tf-T1-T2)/(2 + dx**2*m)
def RK4(f,t0,y0,tf,h):
""" Runge-Kutta method"""
t = np.arange(t0,tf+h,h)
n = len(t)
m = len(y0)
y = np.zeros([n,m])
y[0] = y0
for i in range(n-1):
k1 = f(t[i],y[i])
k2 = f(t[i]+.5*h,y[i]+k1*.5*h)
k3 = f(t[i]+.5*h,y[i]+k2*.5*h)
k4 = f(t[i]+h,y[i]+k3*h)
y[i+1] = y[i] + h/6*(k1 + 2*k2 + 2*k3 + k4)
return t, y.transpose()
def Forward2(f, x, xPoints):
""" Computes the second order forward first derivative """
L = []
for i in range(len(x)-2):
c = (-f(x[i+2])+4*f(x[i+1])-3*f(x[i]))/(x[i+2]-x[i])
L.append(c)
indexes = [np.argwhere(x == i)[0][0] for i in xPoints]
L.append(L[-1])
derivs = [L[i] for i in indexes]
return derivs
def FuncTempFixedT(x):
""" Function for temperature with fixed temperature TL"""
m = np.sqrt(h*P/(k*Ac))
#Finding the constants thetaL an thetab
thL = TL-Tf
thb = Tb-Tf
return thL*np.sinh(m*x)/np.sinh(m*L)+thb*np.sinh(m*(L-x))/np.sinh(m*L) + Tf
def FuncGradientFixedT(x):
""" Function for temperature gradient"""
m = np.sqrt(h*P/(k*Ac))
thL = TL-Tf
thb = Tb-Tf
return m*k*Ac/np.sinh(m*L)*(thb*np.cosh(m*(L-x)) - thL*np.cosh(m*x))
def FuncTempAdiabatic(x):
""" Function for tempearture with adiabatic tip temperature"""
m = np.sqrt(h*P/(k*Ac))
thb = Tb-Tf
return thb*(np.cosh(m*(L-x))/(np.cosh(m*L)))+Tf
def FuncGradientAdiabatic(x):
""" Function for temp gradient with adiabatic tip temperature"""
m = np.sqrt(h*P/(k*Ac))
thb = Tb-Tf
return m*k*Ac*thb*np.sinh(m*(L-x))/np.cosh(m*L)
def HTR(U):
"""Calculates heat transfer rate given T' """
return -k*Ac*U
def ProduceLatexTable(columnHeadings, data, title='', label=''):
""" Prints latex table"""
# Adds a catption and begins table
caption = '{' + title + '}'
print('\\begin{table}')
print(' \centering')
print(' \caption{0}'.format(caption))
# Sets the columns in \begin{tabular} (cccc...)
cols = ''
for i in range(len(data[0])):
cols += 'c'
print(' \\begin{tabular}{@{}', cols, '@{}}\\toprule')
# Sets the column headings
line = ''
for i in range(len(columnHeadings)-1):
line += columnHeadings[i] + ' & '
line += columnHeadings[-1]
print(' ', line, '\\\\ \midrule')
# Prints the rows with the data provided
for i in range(len(data)):
row = ''
for j in range(len(columnHeadings)-1):
# row += str(data[i][j]) + ' & ' # Use this line for the raw input
row += '{0:4.3E}'.format(data[i][j]) + ' & ' # Toggle this line to format numbers
# row += str(data[i][-1]) # Raw input
row += '{0:4.3E}'.format(data[i][-1]) # Format numbers
print(' ', row, '\\\\')
# Ends the table schema
print(' \\bottomrule')
print(' \end{tabular}')
label = '{' + label + '}'
print(' \label{0}'.format(label))
print('\end{table}')
def Display(data, lineType, label='', done = False, xLabel=None, yLabel=None, plotLabel=None, f=None, log=False):
""" Displays data as [x, y]"""
t = np.arange(0, 4, 0.01)
plt.plot(data[0], data[1], lineType, label=label)
if (done):
if f != None:
plt.plot(t, f(t, 1), 'k', label='Exact')
if log:
plt.xscale("log")
plt.yscale("log")
plt.xlabel(xLabel)
plt.ylabel(yLabel)
plt.legend(loc='best')
plt.tight_layout()
# plt.savefig('{0}.pdf'.format(plotLabel))
# plt.show()
def ShootingMethod(guess1, guess2, desiredVal, der):
""" Performs the shooting method with runge-kutta.
desiredVal = value to shoot for
der = order of the dependent variable sought
FOR EXAMPLE: To find what input guess for T' will
give a value of T = TL, then my input would be:
ShootingMethod(guess1, guess2, TL, 1). If I wanted
to get the value of T' = 0 then: ShootingMethod(.., 0, 2)"""
i = der - 1
x1, var1 = RK4(dTdUdx, 0, [Tb,guess1], L, dx)
x2, var2 = RK4(dTdUdx, 0, [Tb,guess2], L, dx)
guess3 = (desiredVal-var1[i,-1])/(var2[i,-1]-var1[i,-1])*(guess2-guess1) + guess1
x3, var3 = RK4(dTdUdx, 0, [Tb, guess3], L, dx)
return x3, var3, guess3
def Qvalues(T):
"""Calculates q values using finite difference values for T"""
dT = [(-3*T[0] + 4*T[1] - T[2])/2/dx]
for i in range(1,5): #For q values between Tb and Tl
dT.append((T[i+1]-T[i-1])/2/dx)
dT.append((3*T[-1] - 4*T[-2] + T[-3])/2/dx)
q = [-k*Ac*i for i in dT]
return q
def GaussSeidelFDMFixedTip():
""" Uses gaussSeidel method to solve for
the temperature across the fin given beginning
and end boundary conditions of temperatures"""
MaxError = 1E-6
error = 1
T = np.arange(0, L+dx, dx)
T[0] = Tb
T[-1] = TL
while error>MaxError:
TOld = np.copy(T)
for i in range(1, len(TOld)-1):
T[i] = TemperatureFDM(T[i-1], T[i+1])
error = np.max(abs(T - TOld)/T)
return T
def GaussSeidelFDMAdiabatic():
""" Uses Gauss Seidel to find the temperatures
across the fin given the beginning temperature
and end rate of heat transfer (0)"""
MaxError = 1E-6
error = 1
T = np.arange(0, L+dx, dx)
T[0] = Tb
T[-1] = 310
while error>MaxError:
TOld = np.copy(T)
for i in range(1, len(TOld)-1):
T[i] = TemperatureFDM(T[i-1], T[i+1])
error = np.max(abs(T - TOld)/T)
return T
def FDM():
"""Finds temp gradient using finite difference method"""
c = -2 - m*dx**2
A = np.array([
[1,0,0,0,0,0],
[1,c,1,0,0,0],
[0,1,c,1,0,0],
[0,0,1,c,1,0],
[0,0,0,1,c,1],
[0,0,0,0,-1,1]
])
b = np.array([[Tb],
[-m*Tf*dx**2],
[-m*Tf*dx**2],
[-m*Tf*dx**2],
[-m*Tf*dx**2],
[]])
return np.linalg.solve(A,b)
def main():
""" Calls all functions and organized to see
it all neatly:)"""
tFixedT, valFixedT, guessFixedT = ShootingMethod(0, -1000, TL, 1) # We want the Temperature to be TL @ L
tAdiabatic, valAdiabatic, guessAdiabatic = ShootingMethod(0, -1000, 0, 2) # We want the derivative to be 0 @ L
TFDM = GaussSeidelFDMFixedTip()
QFDM = Qvalues(TFDM)
TFDMA = GaussSeidelFDMAdiabatic()
QFDMA = Qvalues(TFDMA)
# print('2.')
# print(' (a) @ x = 0, T\' = {0:5.4f} @ x = {1:4.2f}, T = {2:5.4f}'.format(guessFixedT, L, valFixedT[0][-1]))
# print(' (b) @ x = 0, T\' = {0:5.4f} @ x = {1:4.2f}, T\' = {2:5.4f}'.format(guessAdiabatic, L, valAdiabatic[1][-1]))
x = np.arange(0,L,0.0001)
q = FuncGradientFixedT(x)
T = FuncTempFixedT(x)
qa = FuncGradientAdiabatic(x)
Ta = FuncTempAdiabatic(x)
# # Plot of temperature using fixed temperature guess
# plt.figure(figsize=(5, 3))
# Display([x, T], '-', label='Analytical')
# Display([tFixedT, valFixedT[0]], '--', label='Numerical', done=True, xLabel='Position', yLabel='Temperature', plotLabel='Temperature(x)')
# # Plot of temp gradient using fixed temperature guess
# plt.figure(figsize=(5, 3))
# Display([x, q], '-', label='Analytical')
# Display([tFixedT, HTR(valFixedT[1])], '--', label='Numerical', done=True, xLabel='Position', yLabel='Temperature', plotLabel='Temp Gradient(x)')
# # Plot of temperature using adiabatic guess
# plt.figure(figsize=(5, 3))
# Display([x, Ta], '-', label='Analytical')
# Display([tAdiabatic, valAdiabatic[0]], '--', label='Numerical', done=True, xLabel='Position', yLabel='Temperature', plotLabel='TemperatureA(x)')
# # Plot of temp gradient using adiabatic guess
# plt.figure(figsize=(5, 3))
# Display([x, qa], '-', label='Analytical')
# Display([tAdiabatic, HTR(valAdiabatic[1])], '--', label='Numerical', done=True, xLabel='Position', yLabel='Temperature', plotLabel='Temp GradientA(x)')
# # # FINITE DIFFERENCE METHOD
# # Fixed Temp
# Finite Difference Method plotted with RK4 method for temperature (T)
# plt.figure(figsize=(5, 3))
# Display([x, T], '-', label='Analytical')
# Display([tFixedT, TFDM], ':', label='Numerical (FDM)')
# Display([tFixedT, valFixedT[0]], '--', label='Numerical (RK4)', done=True, xLabel='Position', yLabel='Temperature', plotLabel='Temp Comparison')
# # Finite Difference method compared with RK4 for gradient (q)
# plt.figure(figsize=(5, 3))
# Display([x, q], '-', label='Analytical')
# Display([tFixedT, QFDM], ':', label='Numerical (FDM)')
# Display([tFixedT, HTR(valFixedT[1])], '--', label='Numerical (RK4)', done=True, xLabel='Position', yLabel='Temperature Gradient', plotLabel='Gradient Comparison')
# # Adiabatic Tip (T' = 0)
#FDM with RK4 for temperature (T)
plt.figure(figsize=(5, 3))
Display([x, Ta], '-', label='Analytical')
Display([tFixedT, TFDMA], ':', label='Numerical (FDM)')
Display([tAdiabatic, valAdiabatic[0]], '--', label='Numerical (RK4)', done=True, xLabel='Position', yLabel='Temperature', plotLabel='Temp Comparison A')
# FDM with RK4 for temperature gradient (q)
plt.figure(figsize=(5, 3))
Display([x, qa], '-', label='Analytical')
Display([tFixedT, QFDMA], ':', label='Numerical (FDM)')
Display([tAdiabatic, HTR(valAdiabatic[1])], '--', label='Numerical (RK4)', done=True, xLabel='Position', yLabel='Temperature Gradient', plotLabel='Gradient Comparison A')
# Creating lists of all errors
TerrFixed = [abs((valFixedT[0][i] - FuncTempFixedT(tFixedT[i]))/FuncTempFixedT(tFixedT[i])) for i in range(len(tFixedT))]
qerrFixed = [abs((HTR(valFixedT[1][i]) - FuncGradientFixedT(tFixedT[i]))/FuncGradientFixedT(tFixedT[i])) for i in range(len(tFixedT))]
TerrAdiabatic = [abs((valAdiabatic[0][i] - FuncTempAdiabatic(tAdiabatic[i]))/FuncTempAdiabatic(tAdiabatic[i])) for i in range(len(tAdiabatic))]
qerrAdiabatic = [abs((HTR(valAdiabatic[1][i]) - FuncGradientAdiabatic(tAdiabatic[i]))/FuncGradientAdiabatic(tAdiabatic[i])) for i in range(len(tAdiabatic))]
TerrFDM = [abs((TFDM[i] - FuncTempFixedT(tFixedT[i]))/FuncTempFixedT(tFixedT[i])) for i in range(len(tFixedT))]
qerrFDM = [abs((QFDM[i] - FuncGradientFixedT(tFixedT[i]))/FuncGradientFixedT(tFixedT[i])) for i in range(len(tFixedT))]
TerrFDMA = [abs((TFDMA[i] - FuncTempAdiabatic(tAdiabatic[i]))/FuncTempAdiabatic(tAdiabatic[i])) for i in range(len(tAdiabatic))]
qerrFDMA = [abs((QFDMA[i] - FuncGradientAdiabatic(tAdiabatic[i]))/FuncGradientAdiabatic(tAdiabatic[i])) for i in range(len(tAdiabatic))]
# Putting the error data into a latex table
table = []
[table.append([TerrFixed[i], qerrFixed[i], TerrFDM[i], qerrFDM[i]]) for i in range(len(tFixedT))]
ProduceLatexTable([r'$e_T$', r'$e_q$', r'$e_T$', r'$e_q$'], table)
tableA = []
[tableA.append([TerrAdiabatic[i], qerrAdiabatic[i], TerrFDMA[i], qerrFDMA[i]]) for i in range(len(tAdiabatic))]
ProduceLatexTable([r'$e_T$', r'$e_q$', r'$e_T$', r'$e_q$'], tableA)
main()
| null |
ME342NumAnalysis/Unit5/Assignment58.py
|
Assignment58.py
|
py
| 12,087 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.array",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.argwhere",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.sinh",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.sinh",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.cosh",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.cosh",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.sinh",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.cosh",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xscale",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yscale",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.solve",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 222,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 282,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 289,
"usage_type": "name"
}
] |
563058881
|
#!/usr/bin/python
from __future__ import print_function
import argparse
import serial
import time
import os
import stat
try:
import meterbus
except ImportError:
import sys
sys.path.append('../')
import meterbus
def ping_address(ser, address, retries=5):
for i in range(0, retries + 1):
meterbus.send_ping_frame(ser, address)
try:
frame = meterbus.load(meterbus.recv_frame(ser, 1))
if isinstance(frame, meterbus.TelegramACK):
return True
except meterbus.MBusFrameDecodeError:
pass
return False
def setG4modern(ser, address):
meterbus.send_request_setLUG_G4_readout_control(ser, address, 0x00)
try:
frame = meterbus.load(meterbus.recv_frame(ser, 1))
if isinstance(frame, meterbus.TelegramACK):
return True
except meterbus.MBusFrameDecodeError:
pass
return False
def do_reg_file(args):
with open(args.device, 'rb') as f:
frame = meterbus.load(f.read())
if frame is not None:
print(frame.to_JSON())
def do_char_dev(args):
address = None
try:
address = int(args.address)
if not (0 <= address <= 254):
address = args.address
except ValueError:
address = args.address.upper()
try:
#vib_to_show = ['14:0','59:1','59:0','89:0', '93:0', '255.34:0']
#vib_to_show = ['14:0','59:0','89:0', '93:0']
filter = {((14,), 0, 0): "one",
#((14,), 0, 1): "one_b",
((59,), 0, 0): "FLOW",
((62,), 0, 0): "FLOW",
((89,), 0, 0): "FLOW_TEMPERATURE",
((91,), 0, 0): "FLOW_TEMPERATURE",
((93,), 0, 0): "RETURN_TEMPERATURE",
((95,), 0, 0): "RETURN_TEMPERATURE",
((255, 34), 0, 0): "five",
((90,), 0, 0): "FLOW_TEMPERATURE",
((94,), 0, 0): "RETURN_TEMPERATURE",
}
ibt = meterbus.inter_byte_timeout(args.baudrate)
parity = 'E'
if args.monitor: parity = 'N'
with serial.serial_for_url(args.device,
args.baudrate, 8, parity, 1,
inter_byte_timeout=ibt,
timeout=1) as ser:
if meterbus.is_primary_address(address):
if not args.monitor:
ping_address(ser, address, 0)
#ping_address(ser, meterbus.ADDRESS_NETWORK_LAYER, 0)
#setG4modern(ser, address)
#print("Landis+Gyr needs time")
#time.sleep(4)
#print("done")
t_start = time.time()-3
try:
#ser.read(1)
while True:
time.sleep(0.1)
if not args.monitor:
if (time.time() - t_start) <= int(args.sleep):
continue
t_start = time.time()
meterbus.send_request_frame(ser, address)
time.sleep(0.2)
frame = None
#print(ser.inWaiting(), end = ' ')
if ser.inWaiting(): # >= 205:
#ser.read()
framedata = meterbus.recv_frame(ser, meterbus.FRAME_DATA_LENGTH)
print("frame: ",framedata)
if framedata:
frame = meterbus.load(framedata)
if not frame:
continue
records = frame.body.bodyPayload.records
filtered = {"ts": '{:10.0f}'.format(time.time()),
"records": [],
"framedata": framedata.hex(),
}
for record in records:
vib = tuple(record.vib.parts)
func = record.dib.function_type.value
storage_number = record.dib.storage_number
key = (vib, func, storage_number)
if key in filter:
#name = filter.get(key,"value")
filtered['records'].append(record.interpreted)
# print(name)
#record = records[vib]
#print('{:10},{:30}:{}'.format(vib, record['type'], record['value']))
# value = record.value
# if type(value) is int:
# print(' {:8} '.format(value), end='')
# else:
# print(' {:10.8} '.format(value), end='')
#print()
import simplejson as json
print(json.dumps(filtered, sort_keys=True, indent=4, use_decimal=True))
except KeyboardInterrupt:
pass
elif meterbus.is_secondary_address(address):
meterbus.send_select_frame(ser, address)
try:
frame = meterbus.load(meterbus.recv_frame(ser, 1))
except meterbus.MBusFrameDecodeError as e:
frame = e.value
assert isinstance(frame, meterbus.TelegramACK)
frame = None
# ping_address(ser, meterbus.ADDRESS_NETWORK_LAYER, 0)
meterbus.send_request_frame(
ser, meterbus.ADDRESS_NETWORK_LAYER)
time.sleep(0.3)
frame = meterbus.load(
meterbus.recv_frame(ser, meterbus.FRAME_DATA_LENGTH))
if frame is not None:
print(frame.to_JSON())
except serial.serialutil.SerialException as e:
print(e)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Request data over serial M-Bus for devices.')
parser.add_argument('-d', action='store_true',
help='Enable verbose debug')
parser.add_argument('-m', '--monitor', action='store_true',
help='monitor channel, will not send request, listen only')
parser.add_argument('-b', '--baudrate',
type=int, default=2400,
help='Serial bus baudrate')
parser.add_argument('-a', '--address',
type=str, default=meterbus.ADDRESS_BROADCAST_REPLY,
help='Primary or secondary address')
parser.add_argument('-r', '--retries',
type=int, default=5,
help='Number of ping retries for each address')
parser.add_argument('-s', '--sleep',
type=int, default=10,
help='Sleep time')
parser.add_argument('device', type=str, help='Serial device, URI or binary file')
args = parser.parse_args()
meterbus.debug(args.d)
# thread.start_new_thread(key_capture_thread, ())
try:
mode = os.stat(args.device).st_mode
if stat.S_ISREG(mode):
do_reg_file(args)
else:
do_char_dev(args)
except OSError:
do_char_dev(args)
| null |
tools/mbus-serial-continous-request-data.py
|
mbus-serial-continous-request-data.py
|
py
| 7,546 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.path.append",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "meterbus.send_ping_frame",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "meterbus.load",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "meterbus.recv_frame",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "meterbus.TelegramACK",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "meterbus.MBusFrameDecodeError",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "meterbus.send_request_setLUG_G4_readout_control",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "meterbus.load",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "meterbus.recv_frame",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "meterbus.TelegramACK",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "meterbus.MBusFrameDecodeError",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "meterbus.load",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "meterbus.inter_byte_timeout",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "serial.serial_for_url",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "meterbus.is_primary_address",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "meterbus.send_request_frame",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "meterbus.recv_frame",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "meterbus.FRAME_DATA_LENGTH",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "meterbus.load",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "simplejson.dumps",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "meterbus.is_secondary_address",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "meterbus.send_select_frame",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "meterbus.load",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "meterbus.recv_frame",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "meterbus.MBusFrameDecodeError",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "meterbus.TelegramACK",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "meterbus.send_request_frame",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "meterbus.ADDRESS_NETWORK_LAYER",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "meterbus.load",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "meterbus.recv_frame",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "meterbus.FRAME_DATA_LENGTH",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "serial.serialutil",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "meterbus.ADDRESS_BROADCAST_REPLY",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "meterbus.debug",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "os.stat",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "stat.S_ISREG",
"line_number": 207,
"usage_type": "call"
}
] |
79111955
|
# coding: utf-8
# PYTHON IMPORTS
from datetime import datetime
import csv
import re
from types import *
# DJANGO IMPORTS
from django.contrib.admin import helpers
from django.utils.encoding import force_unicode
from django.shortcuts import render_to_response
from django import template
from django.contrib.admin.util import unquote
from django.http import HttpResponse
from django.utils.translation import ugettext as _
def get_csv_export_fields(modeladmin, included):
"""
Return a sequence of tuples which should be included in the export.
"""
model_fields = [f.name for f in modeladmin.model._meta.fields]
#for relation in modeladmin.csv_follow_relations:
# for field in modeladmin.model._meta.get_field_by_name(relation)[0].rel.to._meta.fields:
# fields.append([relation, field.name])
fields = []
for item in modeladmin.list_display:
if item != "action_checkbox":
if csv_get_fieldname(item) in included:
fields.append(item)
elif isinstance(item, FunctionType) and (item.__name__ in included):
fields.append(item)
for f in model_fields:
if (csv_get_fieldname(f) in included) and (csv_get_fieldname(f) not in fields):
fields.append(f)
return fields
def get_csv_export_field_names(modeladmin):
model_fields = [f for f in modeladmin.model._meta.fields]
#for relation in modeladmin.csv_follow_relations:
# for field in modeladmin.model._meta.get_field_by_name(relation)[0].rel.to._meta.fields:
# fields.append([relation, field.name])
fields = []
for item in modeladmin.list_display:
if isinstance(item, FunctionType):
fields.append([item.__name__, item.short_description])
elif item != "action_checkbox":
appended = False
for f in model_fields:
if f.name == item:
fields.append([f.name, f.verbose_name])
appended = True
break
if not appended:
fields.append([item, item])
for f in model_fields:
inserted = False
for item in fields:
if item[0] == f.name:
inserted = True
break
if not inserted:
fields.append([f.name, f.verbose_name])
return fields
def csv_get_export_filename(modeladmin):
ts = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
return '%s_%s_%s_export.csv' % (ts, modeladmin.model._meta.app_label, modeladmin.model._meta.module_name)
def csv_resolve_field(row, fieldname):
if isinstance(fieldname, basestring):
if isinstance(getattr(row, fieldname), MethodType):
return getattr(row, fieldname)()
else:
return getattr(row, fieldname)
elif isinstance(fieldname, FunctionType):
return fieldname(row)
else:
obj = row
for bit in fieldname:
obj = getattr(obj, bit)
return obj
def csv_get_fieldname(field):
if isinstance(field, basestring):
return field
elif isinstance(field, FunctionType):
return field.short_description
return '.'.join(field)
def csv_export_selected(modeladmin, request, queryset):
if request.POST.get('post'):
csv_export_url = '~csv/'
csv_export_dialect = 'excel'
#csv_follow_relations = []
csv_export_fmtparam = {
'delimiter': ';',
'quotechar': '"',
'quoting': csv.QUOTE_MINIMAL,
}
fields = get_csv_export_fields(modeladmin, request.POST.getlist('_fields'))
headers = [csv_get_fieldname(f) for f in fields]
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s' % csv_get_export_filename(modeladmin)
writer = csv.writer(response, csv_export_dialect, **csv_export_fmtparam)
writer.writerow(headers)
for row in queryset:
csvrow = [f.encode('utf-8') if isinstance(f, unicode) else f for f in [csv_resolve_field(row, f) for f in fields]]
writer.writerow(csvrow)
return response
fields = get_csv_export_field_names(modeladmin)
list_display = []
for item in modeladmin.list_display:
if isinstance(item, basestring):
list_display.append(item)
else:
list_display.append(item.__name__)
opts = modeladmin.model._meta
app_label = opts.app_label
context = {
"title": _("Export as CSV"),
"object_name": force_unicode(opts.verbose_name),
'queryset': queryset,
"opts": opts,
"root_path": modeladmin.admin_site.root_path,
"app_label": app_label,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
'fields': fields,
'list_display': list_display,
}
# Display the confirmation page
return render_to_response([
"admin/%s/%s/csv_export_selected_confirmation.html" % (app_label, opts.object_name.lower()),
"admin/%s/csv_export_selected_confirmation.html" % app_label,
"admin/csv_export_selected_confirmation.html"
], context, context_instance=template.RequestContext(request))
csv_export_selected.short_description = "Export selection as CSV"
| null |
3rd_party_apps/grappelli/actions.py
|
actions.py
|
py
| 5,306 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "csv.QUOTE_MINIMAL",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "django.utils.encoding.force_unicode",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.helpers.ACTION_CHECKBOX_NAME",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin.helpers",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "django.template.RequestContext",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 149,
"usage_type": "name"
}
] |
625020565
|
import unittest
from selenium import webdriver
from pyvirtualdisplay import Display
class SearchTests(unittest.TestCase):
def setUp(self):
#display = Display(visible=0, size=(1920, 1080)).start()
self.driver=webdriver.Chrome()
self.driver.implicitly_wait(30)
self.driver.maximize_window()
def test1(self):
driver=self.driver
driver.get("https://www.ultimateqa.com/simple-html-elements-for-automation/")
gg=self.driver.find_element_by_xpath('//*[@id="post-909"]/div/div[3]/div/div[2]/div[5]/div/table/tbody')
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main()
| null |
fake.py
|
fake.py
|
py
| 674 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "unittest.TestCase",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "unittest.main",
"line_number": 22,
"usage_type": "call"
}
] |
417891386
|
'''
動画を取り込んで、txtファイルに変換。
通信仕様変更したほうがいい気がする。
コマンド系は、改行コード
のみframe_start \ n
lazer_off \ n
座標系は、、カンマ
区切りと改行23,285 \ n
24,284 \ n
基本は改行コードで切り分けていき、カンマ区切りで座標のx、yを分ける。
'''
import numpy as np
import cv2
import socket
import math
#define
#Curryのしきい値
Edge_min = 100
Edge_max = 200
delay = 1
#C:/Users/herom/Desktop/NY_Laser_Project/NY_Laser_Project/test_sample_image/blading.mov
#C:/Users/nishiharay/Desktop/NY_Laser_Project/trunk/output_txt/output.txt
file_path='C:/Users/nishiharay/Videos/max_media_movie/blading.mov'
file_write_path = 'C:/Users/nishiharay/Desktop/NY_Laser_Project/trunk/output_txt/output.txt'
lazer_on_message='lazer_on\n'
lazer_off_message='lazer_off\n'
frame_start_message='frame_start\n'
frame_end_message='frame_end\n'
#画像取り込み後の最大サイズ。
Image_Scale_X=480
Image_Scale_Y=480
# アスペクト比を固定して、指定した解像度にリサイズする。
def scale_to_resolation(img, resolation):
h, w = img.shape[:2]
scale = math.sqrt(resolation / (h * w))
return cv2.resize(img, dsize=None, fx=scale, fy=scale)
# アスペクト比を固定して、指定した大きさに収まるようリサイズする。
def scale_box(img, width, height):
scale = min(width / img.shape[1], height / img.shape[0])
return cv2.resize(img, dsize=None, fx=scale, fy=scale)
#動画を取り込む
try:
cap = cv2.VideoCapture(file_path)
#取り込めなかった場合。
except cv2.error:
print('you can not open file')
#ビデオ情報表示
print('width:',cap.get(cv2.CAP_PROP_FRAME_WIDTH))
print('height:',cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print('FPS:',cap.get(cv2.CAP_PROP_FPS))
print('FrameCount:',cap.get(cv2.CAP_PROP_FRAME_COUNT))
frameconut=cap.get(cv2.CAP_PROP_FRAME_COUNT)
print('Movie_time:',cap.get(cv2.CAP_PROP_FRAME_COUNT) / cap.get(cv2.CAP_PROP_FPS))
#1frameに変換
ret, frame = cap.read()
#動画保存用変数
#fourcc = cv2.VideoWriter_fourcc('m','p','4', 'v')
#out = cv2.VideoWriter('output.mp4',fourcc, cap.get(cv2.CAP_PROP_FPS), (Image_Scale_X,Image_Scale_Y),True)
#send_message初期化
send_message=''
while(cap.isOpened()):
ret, frame = cap.read()
#frameをリサイズ
scaled_img=scale_box(frame,Image_Scale_X,Image_Scale_Y)
#ブラーをかけてノイズを飛ばす。
blur=cv2.blur(scaled_img,(3,3))
# グレースケース化
gray = cv2.cvtColor(blur, cv2.COLOR_BGR2GRAY)
#画像を反転
gray=cv2.flip(gray,0)
#CannyにてEdge取り出し。
edges = cv2.Canny(gray,Edge_min,Edge_max)
#二値化
ret,thresh2 = cv2.threshold(edges,127,255,cv2.THRESH_BINARY)
#output_image
output = thresh2
#輪郭の取り出し
contours, hierarchy = cv2.findContours(thresh2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
#輪郭を描画
cv2.drawContours(frame, contours, -1, (0,255,0), 1)
#動画を表示
'''
if ret:
cv2.imshow("Check_Movie", output)
if cv2.waitKey(delay) & 0xFF == ord('q'):
break
else:
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
#'''
#動画として書き出し
#out.write(output)
#'''udpの送信でFPSが落ちる。おそらくpytyon状のfor分の影響だと考えられる。
#ポイントの数を下げる関数を考えるかc++にするか。
send_message=send_message+frame_start_message
for i in range(len(contours)):
first_flag=1
send_message=send_message+lazer_off_message
for j in range(len(contours[i])):
for k in range(len(contours[i][j])):
data=','.join(map(str, contours[i][j][k]))
msg = data #送信する文字列
if first_flag==1:
first_flag=0
elif first_flag==0:
send_message=send_message+lazer_on_message
first_flag=2
send_message=send_message+msg
send_message=send_message+'\n'
send_message=send_message+frame_end_message
#'''
#今のフレーム数を確認する。
print(cap.get(cv2.CAP_PROP_POS_FRAMES))
if (cap.get(cv2.CAP_PROP_POS_FRAMES)==frameconut):
#保存するためループを抜ける。
break
with open(file_write_path, mode='w') as f:
f.write(send_message)
cap.release()
cv2.destroyAllWindows()
| null |
Python/main_movie_fileoutput_kaigyou.py
|
main_movie_fileoutput_kaigyou.py
|
py
| 4,586 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "math.sqrt",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "cv2.error",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FRAME_WIDTH",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FRAME_HEIGHT",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FPS",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FRAME_COUNT",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FRAME_COUNT",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FRAME_COUNT",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FPS",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "cv2.blur",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "cv2.flip",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "cv2.Canny",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "cv2.findContours",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_TREE",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_SIMPLE",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "cv2.drawContours",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_POS_FRAMES",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_POS_FRAMES",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 141,
"usage_type": "call"
}
] |
478680555
|
#导入相关的包
import smtplib
from email.mime.text import MIMEText
mail_content="""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Title</title>
</head>
<body>
<h1>这是一封python邮件</h1>
</body>
</html>
"""
#MIMEText三个主要参数
#1.邮件内容
#2.MIME子类型,在此案例中我们使用plain表示text类型
#3.邮件编码格式
msg = MIMEText(mail_content,"html","utf-8")
#发送email地址,此处地址为qq邮箱
from_aadr = "[email protected]"
#授权码
from_pwd = "pfppgamsqjqabehf"
#收件人信息
to_addr = "[email protected]"
#输入SMTP服务器地址
#此处根据不同的邮件服务商有不同的值
#现在基本任何一家邮件服务商,如果采用第三方收发邮件,需要开启授权选项
#腾讯qq邮箱的SMTP地址是 smtp.qq.com
smtp_srv = "smtp.qq.com"
#此处使用异常是害怕代码出现异常
try:
#两个参数
#第一个是服务器地址,但一定是bytes格式,所以要编码
#第二个参数是服务器接收访问端口
srv = smtplib.SMTP_SSL(smtp_srv.encode(),465)
#登录邮箱发送
srv.login(from_aadr,from_pwd)
#发送邮箱
#三个参数
#1.发送地址
#2.接受地址,必须是list形式
#3.发送内容,作为字符串发送
srv.sendmail(from_aadr,[to_addr],msg.as_string())
srv.quit()
except Exception as e:
print(e)
| null |
net编程/07.py
|
07.py
|
py
| 1,541 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "email.mime.text.MIMEText",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "smtplib.SMTP_SSL",
"line_number": 45,
"usage_type": "call"
}
] |
527876114
|
import cv2
import numpy as np
import pano_stitcher as ps
# Load source images
p1 = cv2.imread('my_panos/src/part1.jpg')
p2 = cv2.imread('my_panos/src/part2.jpg')
p3 = cv2.imread('my_panos/src/part3.jpg')
# Warp first image by the homography mapping
# the first image to the second image
p1_homography = ps.homography(p2, p1)
p1_warped, p1_origin = ps.warp_image(p1, p1_homography)
# Warp third image by the homography mapping
# the third image to the second image
p3_homography = ps.homography(p2, p3)
p3_warped, p3_origin = ps.warp_image(p3, p3_homography)
# Add alpha channel to second image
blue, green, red = cv2.split(p2)
alpha = np.zeros(green.shape, dtype=np.uint8)
alpha.fill(255)
p2 = cv2.merge([blue, green, red, alpha])
# Composite warped images and image in target plane
pano = ps.create_mosaic(
[p1_warped, p2, p3_warped], [p1_origin, (0, 0), p3_origin])
cv2.imwrite('my_panos/pano.jpg', pano)
| null |
CS378-Computer Vision/project_1/stitch.py
|
stitch.py
|
py
| 918 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cv2.imread",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pano_stitcher.homography",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pano_stitcher.warp_image",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pano_stitcher.homography",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pano_stitcher.warp_image",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.split",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "cv2.merge",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pano_stitcher.create_mosaic",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 31,
"usage_type": "call"
}
] |
103723213
|
from django.shortcuts import render, get_object_or_404, redirect
from blogtest.models import Post
from .models import Comment
from .form import CommentForm
# Create your views here.
def post_comment(request, post_pk):
# 先获取被评论的文章,因为后面需要把评论和被评论的文章关联起来。
# 这里我们使用了 Django 提供的一个快捷函数 get_object_or_404,
# 这个函数的作用是当获取的文章(Post)存在时,则获取;否则返回 404 页面给用户。
post = get_object_or_404(Post, pk=post_pk)
if request.method == "POST":
form = CommentForm(request.POST)
# 调用form.is_valid()方法,django自动检查表单的数据是否符合格式要求
if form.is_valid():
# 检查到数据是合法的,调用表单的save方法保存到数据库
# commit = False 的作用是仅仅利用表单的数据生成Comment模型类的实例,但还不保存评论数据到数据库
comment = form.save(commit=False)
# 将评论和被评论的文章关联起来
comment.post = post
# 最终将评论数据保存进数据库,调用模型实例的save方法
comment.save()
# 重定向到post的详情页,实际上当redirect函数接收一个模型的实例时,它会调用这个模型实例的get_absolute_url方法,
# 然后重定向到get_absolute_url方法返回URL
return redirect(post)
else:
# 检查到数据不合法,重新渲染详情页,并且渲染表单的错误。
# 因此我们传了三个模板变量给 detail.html,
# 一个是文章(Post),一个是评论列表,一个是表单 form
# 注意这里我们用到了 post.comment_set.all() 方法,
# 这个用法有点类似于 Post.objects.all()
# 其作用是获取这篇 post 下的的全部评论,
# 因为 Post 和 Comment 是 ForeignKey 关联的,
# 因此使用 post.comment_set.all() 反向查询全部评论。
comment_list = post.comment_set.all() # 等价于 Comment.objects.filter(post=post)
context = {'post': post,
'form': form,
'comment_list': comment_list}
return render(request, 'blogtest/detail.html', context=context)
# 不是post请求,说明用户没有提交数据,重定向到文章详情页
return redirect(post)
| null |
comments/views.py
|
views.py
|
py
| 2,546 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "blogtest.models.Post",
"line_number": 15,
"usage_type": "argument"
},
{
"api_name": "form.CommentForm",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "form.is_valid",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "form.save",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 50,
"usage_type": "call"
}
] |
539887010
|
# -*- coding: utf-8 -*-
"""
Flask app initialization.
"""
from flask import Flask
from flask.ext.mako import MakoTemplates
from flask_login import LoginManager
from flask_sqlalchemy import SQLAlchemy
from flask_user import SQLAlchemyAdapter, UserManager
app = Flask(__name__) # pylint: disable=invalid-name
MakoTemplates(app)
db = SQLAlchemy(app)
def register_login_manager(app):
"""
Creates app.login_manager.
"""
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
def register_user_manager():
"""
Creates app.user_manager and database.
Call it after loading config which contains 'SECRET_KEY'.
"""
from models import User
from forms import LoginForm, RegisterForm
db_adapter = SQLAlchemyAdapter(db, User)
try:
app.user_manager
except AttributeError:
UserManager(
db_adapter,
app=app,
login_form=LoginForm,
register_form=RegisterForm
)
db.create_all()
return db_adapter
register_login_manager(app)
| null |
src/presence_analyzer/main.py
|
main.py
|
py
| 1,093 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.ext.mako.MakoTemplates",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask_login.LoginManager",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "flask_user.SQLAlchemyAdapter",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "models.User",
"line_number": 33,
"usage_type": "argument"
},
{
"api_name": "flask_user.UserManager",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "forms.LoginForm",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "forms.RegisterForm",
"line_number": 42,
"usage_type": "name"
}
] |
326407413
|
from datetime import datetime
from django.conf import settings
from django.utils.safestring import mark_safe
from drchrono.api import get_all_patients
# Since we cannot filter partial dates, we get all patients and then select
# those that have their birthday today.
def get_birthday_patients(social):
all_patients = get_all_patients(social)
# Select the patients if their date of birth is entered and today
if settings.DEBUG:
# Use 09 02 as default for testing
birthday_patients = [patient for patient in all_patients if patient['date_of_birth'] and
[2, 11] == map(int, patient['date_of_birth'].split('-'))[1:3]]
else:
birthday_patients = [patient for patient in all_patients if patient['date_of_birth'] and
[datetime.today().month, datetime.today().day] ==
map(int, patient['date_of_birth'].split('-'))[1:3]]
return birthday_patients
# This function creates the body of the happy birthday email based on the patient information available
def make_email_message(patient, doctor):
message = ''
if patient['first_name']:
message = 'Dear ' + patient['first_name'] + ' ' + patient['last_name'] + ',%0D%0A %0D%0A'
elif patient['last_name']:
if patient['gender']:
if patient['gender'] == 'Male':
message = 'Dear Mr. ' + patient['last_name'] + '%0D%0A %0D%0A'
elif patient['gender'] == 'Female':
message = 'Dear Ms. ' + patient['last_name'] + '%0D%0A %0D%0A'
else:
message = 'Dear Mr. or Ms. ' + patient['last_name'] + '%0D%0A %0D%0A'
else:
message = 'Dear Mr. or Ms. ' + patient['last_name'] + '%0D%0A %0D%0A'
# If no first or last name is available, the first line is skipped
message += 'Happy birthday from your doctor! %0D%0A %0D%0A Best regards, %0D%0A '
if doctor.get_full_name():
message += 'Doctor ' + doctor.get_full_name()
else:
message += 'Your doctor'
return message
def make_birthday_message(patient, doctor):
# For correct ways of referring to patient, reads easier than using patient['first_name']
himher = 'them'
hisher = 'their'
if patient['gender'] == 'Male':
himher = 'him'
hisher = 'his'
elif patient['gender'] == 'Female':
himher = 'her'
hisher = 'her'
message = ''
# Add the photo of the patient if there is one
if patient['patient_photo']:
message += '<img src = \"' + patient['patient_photo'] + \
'\" alt = \"Patient Photo\" align = \"right\" width = \"100px\">'
# Notify doctor of birthday
message += '<div class="hero-unit"> <p>' + patient['first_name'] + ' ' + patient['last_name'] + \
' has ' + hisher + ' birthday today!'
# Add contact information if it is available
if patient['email']:
message += '<p> Contact ' + himher + ' by email at: ' + \
'<a href=\"mailto:' + patient['email'] + \
' ?subject=Happy%20Birthday!' + \
'&body=' + make_email_message(patient, doctor) + '\"> ' + \
patient['email'] + '</a>'
if patient['cell_phone']:
message += '<p> Give ' + himher + ' a call at: <a href="tel:' + \
patient['cell_phone'] + '">' + patient['cell_phone'] + '</a>'
elif patient['home_phone']:
message += '<p> Give ' + himher + ' a call at: <a href="tel:' + \
patient['home_phone'] + '">' + patient['home_phone'] + '</a>'
# Or a message if it is not
if not (patient['email'] or patient['cell_phone'] or patient['home_phone']):
message += '<p> Unfortunately, there is no contact information available for ' + himher
# uncomment the line below to show all of the patients information
# message += '<p>' + str(patient)
message += '</div>'
return mark_safe(message)
| null |
drchrono/birthday_wisher/modules.py
|
modules.py
|
py
| 3,984 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "drchrono.api.get_all_patients",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEBUG",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.utils.safestring.mark_safe",
"line_number": 103,
"usage_type": "call"
}
] |
387437624
|
#
# sublimelinter.py
# Part of SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Ryan Hileman and Aparajita Fishman
#
# Project: https://github.com/SublimeLinter/SublimeLinter3
# License: MIT
#
import sublime
import sublime_plugin
import json
import os
import re
import time
from .lint.linter import Linter
from .lint.highlight import HighlightSet
from .lint import persist
# In ST3, this is the entry point for a plugin
def plugin_loaded():
persist.plugin_is_loaded = True
persist.load_settings()
class SublimeLinter(sublime_plugin.EventListener):
'''The main ST3 plugin class.'''
# We use this to match linter settings filenames.
LINTER_SETTINGS_RE = re.compile('^SublimeLinter(-.+?)?\.sublime-settings')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Keeps track of which views we have assigned linters to
self.loaded_views = set()
# Keeps track of which views have actually been linted
self.linted_views = set()
# A mapping between view ids and syntax names
self.view_syntax = {}
persist.queue.start(self.lint)
# This gives us a chance to lint the active view on fresh install
window = sublime.active_window()
if window:
self.on_activated(window.active_view())
# We keep track of the start time to check for race conditions elsewhere
self.start_time = time.time()
# Every time a view is modified, this is updated and an asynchronous lint is queued.
# When a lint is done, if the view has been modified since the lint was initiated,
# marks are not updated because their positions may no longer be valid.
self.last_hit_time = 0
def lint(self, view_id, hit_time, callback=None):
callback = callback or self.highlight
view = Linter.get_view(view_id)
if view is None:
return
# Build a list of regions that match the linter's selectors
sections = {}
for sel, _ in Linter.get_selectors(view_id):
sections[sel] = []
for result in view.find_by_selector(sel):
sections[sel].append(
(view.rowcol(result.a)[0], result.a, result.b)
)
filename = view.file_name()
code = Linter.text(view)
Linter.lint_view(view_id, filename, code, sections, hit_time, callback)
def highlight(self, view, linters, hit_time):
'''Highlight any errors found during a lint.'''
errors = {}
highlights = HighlightSet()
for linter in linters:
if linter.highlight:
highlights.add(linter.highlight)
if linter.errors:
for line, errs in linter.errors.items():
errors.setdefault(line, []).extend(errs)
# If the view has been modified since the lint was triggered, don't draw marks
if self.last_hit_time > hit_time:
return
HighlightSet.clear(view)
highlights.draw(view)
persist.errors[view.id()] = errors
# Update the status
self.on_selection_modified_async(view)
def hit(self, view):
'''Record an activity that could trigger a lint and enqueue a desire to lint.'''
self.linted_views.add(view.id())
if view.size() == 0:
for l in Linter.get_linters(view.id()):
l.clear()
return
self.last_hit_time = persist.queue.hit(view)
def check_syntax(self, view, lint=False):
vid = view.id()
syntax = view.settings().get('syntax')
# Syntax either has never been set or just changed
if not vid in self.view_syntax or self.view_syntax[vid] != syntax:
self.view_syntax[vid] = syntax
# Assign a linter, then maybe trigger a lint if we get one
if Linter.assign(view) and lint:
self.hit(view)
# sublime_plugin.EventListener event handlers
def on_modified(self, view):
'''Called when a view is modified.'''
self.check_syntax(view)
self.hit(view)
def on_modified_async(self, view):
'''Called *after* on_modified, updates the status.'''
self.on_selection_modified_async(view)
def on_load(self, view):
'''Called when a file is finished loading.'''
self.on_new(view)
def on_activated_async(self, view):
'''Called when a view gains input focus.'''
# Reload the plugin settings.
persist.load_settings()
if not view:
return
self.check_syntax(view, True)
view_id = view.id()
if not view_id in self.linted_views:
if not view_id in self.loaded_views:
# It seems on_activated can be called before loaded on first start
if time.time() - self.start_time < 5:
return
self.on_new(view)
self.hit(view)
self.on_selection_modified_async(view)
def on_open_settings(self, view):
'''
Called when any settings file is opened via the Preferences menu.
view is the view that contains the text of the settings file.
'''
filename = view.file_name()
if not filename:
return
dirname, filename = os.path.split(filename)
dirname = os.path.basename(dirname)
# We are only interested in the user SublimeLinter settings, not the default settings
if not self.LINTER_SETTINGS_RE.match(filename) or dirname != 'User':
return
persist.load_settings()
settings = persist.settings
# Fill in default linter settings
linters = settings.pop('linters', {})
for name, language in persist.languages.items():
default = language.get_settings().copy()
default.update(linters.pop(name, {}))
linters[name] = default
settings['linters'] = linters
def replace(edit):
if not view.is_dirty():
j = json.dumps({'user': settings}, indent=4, sort_keys=True)
j = j.replace(' \n', '\n')
view.replace(edit, sublime.Region(0, view.size()), j)
persist.edits[view.id()].append(replace)
view.run_command('sublimelinter_edit')
view.run_command('save')
def on_new(self, view):
'''Called when a new buffer is created.'''
self.on_open_settings(view)
vid = view.id()
self.loaded_views.add(vid)
self.view_syntax[vid] = view.settings().get('syntax')
Linter.assign(view)
def on_selection_modified_async(self, view):
'''Called when the selection changes (cursor moves or text selected).'''
vid = view.id()
# Get the line number of the first line of the first selection.
try:
lineno = view.rowcol(view.sel()[0].begin())[0]
except IndexError:
lineno = -1
if vid in persist.errors:
errors = persist.errors[vid]
if errors:
lines = sorted(list(errors))
counts = [len(errors[line]) for line in lines]
count = sum(counts)
plural = 's' if count > 1 else ''
if lineno in errors:
# Sort the errors by column
line_errors = sorted(errors[lineno], key=lambda error: error[0])
line_errors = [error[1] for error in line_errors]
if plural:
# Sum the errors before the first error on this line
index = lines.index(lineno)
first = sum(counts[0:index]) + 1
if len(line_errors) > 1:
status = '{}-{} of {} errors: '.format(first, first + len(line_errors) - 1, count)
else:
status = '{} of {} errors: '.format(first, count)
else:
status = 'Error: '
status += '; '.join(line_errors)
else:
status = '%i error%s' % (count, plural)
view.set_status('sublimelinter', status)
else:
view.erase_status('sublimelinter')
persist.queue.delay(1.0) # Delay the queue so movement is smooth
class sublimelinter_edit(sublime_plugin.TextCommand):
'''A plugin command used to generate an edit object for a view.'''
def run(self, edit):
persist.edit(self.view.id(), edit)
| null |
sublimelinter.py
|
sublimelinter.py
|
py
| 8,683 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "lint.persist.plugin_is_loaded",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "lint.persist",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "lint.persist.load_settings",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "lint.persist",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "sublime_plugin.EventListener",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "lint.persist.queue.start",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "lint.persist.queue",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "lint.persist",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "sublime.active_window",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "lint.linter.Linter.get_view",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "lint.linter.Linter",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "lint.linter.Linter.get_selectors",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "lint.linter.Linter",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "lint.linter.Linter.text",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "lint.linter.Linter",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "lint.linter.Linter.lint_view",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "lint.linter.Linter",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "lint.highlight.HighlightSet",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "lint.highlight.HighlightSet.clear",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "lint.highlight.HighlightSet",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "lint.persist.errors",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "lint.persist",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "lint.linter.Linter.get_linters",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "lint.linter.Linter",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "lint.persist.queue.hit",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "lint.persist.queue",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "lint.persist",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "lint.linter.Linter.assign",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "lint.linter.Linter",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "lint.linter",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "lint.persist.load_settings",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "lint.persist",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "lint.persist.load_settings",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "lint.persist",
"line_number": 190,
"usage_type": "name"
},
{
"api_name": "lint.persist.settings",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "lint.persist",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "lint.persist.languages.items",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "lint.persist.languages",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "lint.persist",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "sublime.Region",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "lint.persist.edits",
"line_number": 209,
"usage_type": "attribute"
},
{
"api_name": "lint.persist",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "lint.linter.Linter.assign",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "lint.linter.Linter",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "lint.persist.errors",
"line_number": 231,
"usage_type": "attribute"
},
{
"api_name": "lint.persist",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "lint.persist.errors",
"line_number": 232,
"usage_type": "attribute"
},
{
"api_name": "lint.persist",
"line_number": 232,
"usage_type": "name"
},
{
"api_name": "lint.persist.queue.delay",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "lint.persist.queue",
"line_number": 265,
"usage_type": "attribute"
},
{
"api_name": "lint.persist",
"line_number": 265,
"usage_type": "name"
},
{
"api_name": "sublime_plugin.TextCommand",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "lint.persist.edit",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "lint.persist",
"line_number": 271,
"usage_type": "name"
}
] |
87508914
|
#!/usr/bin/env python3
### Requirements ###
# bqpjson v0.5 - pip install bqpjson
# ortools v1.5 - https://developers.google.com/optimization/
import sys, json, argparse
from ortools.linear_solver import pywraplp
import bqpjson
def main(args):
if args.input_file == None:
data = json.load(sys.stdin)
else:
with open(args.input_file) as file:
data = json.load(file)
bqpjson.validate(data)
if data['variable_domain'] != 'boolean':
print('only boolean domains are supported. Given %s' % data['variable_domain'])
quit()
solver = pywraplp.Solver('BOP', pywraplp.Solver.BOP_INTEGER_PROGRAMMING)
solver.EnableOutput()
#solver.SetSolverSpecificParametersAsString('prune_search_tree:true')
#solver.SetSolverSpecificParametersAsString('use_random_lns:false')
#solver.SetSolverSpecificParametersAsString('num_relaxed_vars:40')
#solver.SetSolverSpecificParametersAsString('use_potential_one_flip_repairs_in_ls:true')
#solver.SetSolverSpecificParametersAsString('use_lp_strong_branching:true')
#solver.SetSolverSpecificParametersAsString('lp_max_deterministic_time:10.0')
if args.runtime_limit != None:
solver.SetTimeLimit(args.runtime_limit*1000)
variable_ids = set(data['variable_ids'])
variable_product_ids = set([(qt['id_tail'], qt['id_head']) for qt in data['quadratic_terms']])
variable_lookup = {}
for vid in variable_ids:
variable_lookup[(vid,vid)] = solver.BoolVar(name='site_{:04d}'.format(vid))
for pair in variable_product_ids:
variable_lookup[pair] = solver.BoolVar(name='product_{:04d}_{:04d}'.format(*pair))
# models conjunction of two binary variablies
for i,j in variable_product_ids:
solver.Add(variable_lookup[(i,j)] >= variable_lookup[(i,i)] + variable_lookup[(j,j)] - 1)
solver.Add(variable_lookup[(i,j)] <= variable_lookup[(i,i)])
solver.Add(variable_lookup[(i,j)] <= variable_lookup[(j,j)])
# TODO is there a way to give "/\" to the solver directly?
linear_terms = [int(lt['coeff'])*variable_lookup[(lt['id'], lt['id'])] for lt in data['linear_terms']]
quadratic_terms = [int(qt['coeff'])*variable_lookup[(qt['id_tail'], qt['id_head'])] for qt in data['quadratic_terms']]
obj_expr = solver.Sum(linear_terms + quadratic_terms)
obj = solver.Minimize(obj_expr)
solver.Solve()
if args.show_solution:
print('')
for k,v in variable_lookup.items():
print('{} - {}'.format(k, v.SolutionValue()))
print('')
print('obj_ub = {}'.format(solver.Objective().Value()))
print('obj_lb = {}'.format(solver.Objective().BestBound()))
print('walltime: {}ms'.format(solver.WallTime()))
nodes = len(data['variable_ids'])
edges = len(data['quadratic_terms'])
obj_ub = solver.Objective().Value()
obj_lb = solver.Objective().BestBound()
node_count = 0 # iterations and nodes are not available
cut_count = 0
runtime = solver.WallTime()/1000.0
scaled_objective = data['scale']*(obj_ub+data['offset'])
scaled_lower_bound = data['scale']*(obj_lb+data['offset'])
print('BQP_DATA, %d, %d, %f, %f, %f, %f, %f, %d, %d' % (nodes, edges, scaled_objective, scaled_lower_bound, obj_ub, obj_lb, runtime, cut_count, node_count))
def build_cli_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--input-file', help='the data file to operate on (.json)')
parser.add_argument('-rtl', '--runtime-limit', help='runtime limit (sec.)', type=int)
parser.add_argument('-tl', '--thread-limit', help='thread limit', type=int, default=10)
parser.add_argument('-ss', '--show-solution', help='prints the best solutoin found at termination', action='store_true', default=False)
return parser
if __name__ == '__main__':
parser = build_cli_parser()
main(parser.parse_args())
| null |
bop_ortools.py
|
bop_ortools.py
|
py
| 3,896 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "json.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "bqpjson.validate",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "ortools.linear_solver.pywraplp.Solver",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "ortools.linear_solver.pywraplp",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 88,
"usage_type": "call"
}
] |
41665574
|
import sys
sys.stdin = open('input.txt', 'r')
import sys
input = sys.stdin.readline
from itertools import combinations
N = int(input())
arr = [list(map(int, input().split())) for _ in range(N)]
ans = float('inf')
for case in combinations(range(N), N//2):
esac = tuple(set(range(N)).difference(case))
temp, pmet = 0, 0
for p in case:
for q in case[::-1]:
temp += arr[p][q]
for r in esac:
for s in esac[::-1]:
pmet += arr[r][s]
if ans > abs(temp-pmet):
ans = abs(temp-pmet)
print(ans)
| null |
BOJ/201904/14889.py
|
14889.py
|
py
| 554 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.stdin",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "itertools.combinations",
"line_number": 12,
"usage_type": "call"
}
] |
262733548
|
import socket
import struct
import numpy as np
import matplotlib.pyplot as plt
import time
from matplotlib.animation import FuncAnimation
import threading
import sys
from scipy.signal import savgol_filter
src_addr = '129.82.45.102'
# src_addr = '127.0.0.1'
src_port = 8000
stream_id = 32
def connect():
"""
Connect to a specific port
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((src_addr, src_port))
except:
print("Error connecting to {}:{}".format(src_addr, src_port))
return None
try:
print("Sending stream info")
sock.sendall(struct.pack('<i', stream_id))
except:
print("Error: Stream rejected")
return None
print("Successfully connected to host")
return sock
def decode_frame(raw_frame):
# The format is given according to the following assumption of network data
# Expect little endian byte order
endianness = "<"
# [ commonTimestamp | frame type | Tracked body count | Engaged
header_format = "qiBB"
timestamp, frame_type, tracked_body_count, engaged = struct.unpack(endianness + header_format,
raw_frame[:struct.calcsize(header_format)])
# For each body, a header is transmitted
# TrackingId | HandLeftConfidence | HandLeftState | HandRightConfidence | HandRightState ]
body_format = "Q4B"
# For each of the 25 joints, the following info is transmitted
# [ JointType | TrackingState | Position.X | Position.Y | Position.Z | Orientation.W | Orientation.X | Orientation.Y | Orientation.Z ]
joint_format = "BB7f"
frame_format = body_format + (joint_format * 25)
# Unpack the raw frame into individual pieces of data as a tuple
frame_pieces = struct.unpack(endianness + (frame_format * (1 if engaged else 0)),
raw_frame[struct.calcsize(header_format):])
decoded = (timestamp, frame_type, tracked_body_count, engaged) + frame_pieces
return decoded
def recv_all(sock, size):
result = b''
while len(result) < size:
data = sock.recv(size - len(result))
if not data:
raise EOFError("Error: Received only {} bytes into {} byte message".format(len(data), size))
result += data
return result
def recv_skeleton_frame(sock):
"""
To read each stream frame from the server
"""
(load_size,) = struct.unpack("<i", recv_all(sock, struct.calcsize("<i")))
# print load_size
return recv_all(sock, load_size)
# following codes get the elbow and wrist information from the kinect sensor
class Pointing:
def __init__(self, pointing_mode='screen'):
if pointing_mode == 'screen':
self.screen_mode = True
elif pointing_mode == 'desk':
self.screen_mode = False
else:
raise ValueError('Pointing mode is not recognized!\n Accepted: screen, desk\n Received: %s' % pointing_mode)
if not self.screen_mode:
# use this if in desk mode
self.WRISTLEFT = 6 # JointType specified by kinect
self.WRISTRIGHT = 10
self.ELBOWLEFT = 5
self.ELBOWRIGHT = 9
self.joint_interest_coded = [self.WRISTLEFT, self.WRISTRIGHT, self.ELBOWLEFT, self.ELBOWRIGHT]
else:
# use this if in screen mode
self.HANDTIPLEFT = 21 # JointType specified by kinect
self.HANDTIPRIGHT = 23
self.SHOULDERLEFT = 4
self.SHOULDERRIGHT = 8
self.joint_interest_coded = [self.HANDTIPLEFT, self.HANDTIPRIGHT, self.SHOULDERLEFT, self.SHOULDERRIGHT]
self.joint_info = {i: None for i in self.joint_interest_coded} # contains left/right wrists/elbows coordinates
self.joint_info_buffer = {i: [] for i in self.joint_interest_coded}
self.lpoint_buffer = []
self.rpoint_buffer = []
self.lpoint_tmp = (0.0, -0.6)
self.rpoint_tmp = (0.0, -0.6)
self.lpoint = (0.0, -0.6) # inferred pointing coordinate on the table from left arm
self.rpoint = (0.0, -0.6) # inferred pointing coordinate on the table from right arm
self.lpoint_var = (0, 0) # variance of left point, sent to Brandeis
self.rpoint_var = (0, 0) # variance of right point, sent to Brandeis
self.lpoint_stable = False # whether left hand pointing is stable
self.rpoint_stable = False # whether right hand pointing is stable
def get_pointing_main(self, src, is_smoothing_joint=True, is_smoothing_point=True):
if not self._get_wrist_elbow(src):
return
if self.screen_mode:
try:
if is_smoothing_joint:
self._smoothing_joint(5, 2)
self._smoothing_joint_mean(5)
self._get_pointing(True) # True is coordinates on screen
if is_smoothing_point:
pass
self._smoothing_point_mean(5)
self._smoothing_point(5, 2)
self.lpoint = (self.lpoint_tmp[0] - 0.25, self.lpoint_tmp[1])
self.rpoint = (self.rpoint_tmp[0] + 0.25, self.rpoint_tmp[1])
except Exception as e:
print(e)
else:
try:
self._smoothing_joint_desk(3, 2)
self._get_pointing(False)
self._smoothing_point(3, 2)
self.lpoint, self.rpoint = self.lpoint_tmp, self.rpoint_tmp
except Exception as e:
print(e)
self.lpoint_var = np.std(self.lpoint_buffer, axis=0)
self.rpoint_var = np.std(self.rpoint_buffer, axis=0)
if np.any((np.amax(self.lpoint_buffer, axis=0) - np.amin(self.lpoint_buffer, axis=0)) > [0.005, 0.005]):
self.lpoint_stable = False
else:
self.lpoint_stable = True
if np.any((np.amax(self.rpoint_buffer, axis=0) - np.amin(self.rpoint_buffer, axis=0)) > [0.005, 0.005]):
self.rpoint_stable = False
else:
self.rpoint_stable = True
def _get_wrist_elbow(self, src):
'''
This function retrieves the coordinates for left/right wrists/elbows (4 sets of 3 values: x, y, z)
@:param src: decoded frame retrieved from the decode_frame() function
'''
try:
for i in range(25):
if src[(i + 1) * 9] in self.joint_interest_coded:
self.joint_info[src[(i + 1) * 9]] = src[(i + 1) * 9 + 2: (i + 2) * 9 + 5]
return True
except IndexError:
print('Not enough coordinates to unpack')
return False
def _smoothing_joint(self, window_length=5, polyorder=2):
for k, v in self.joint_info_buffer.items():
if len(v) >= window_length:
self.joint_info_buffer[k].pop(0)
self.joint_info_buffer[k].append(self.joint_info[k])
joint_smoothed = savgol_filter(self.joint_info_buffer[k], window_length, polyorder, axis=0).tolist()
self.joint_info[k] = joint_smoothed[window_length // 2]
else:
self.joint_info_buffer[k].append(self.joint_info[k])
def _smoothing_joint_mean(self, window_length=5):
for k, v in self.joint_info_buffer.items():
if len(v) >= window_length:
self.joint_info_buffer[k].pop(0)
self.joint_info_buffer[k].append(self.joint_info[k])
self.joint_info[k] = np.mean(self.joint_info_buffer[k], axis=0)
else:
self.joint_info_buffer[k].append(self.joint_info[k])
def _smoothing_point(self, window_length=5, polyorder=2):
'''
Smoothing function for left and right pointing coordinates
:param window_length:
:param polyorder:
:return:
'''
if len(self.lpoint_buffer) >= window_length:
self.lpoint_buffer.pop(0)
self.lpoint_buffer.append(self.lpoint_tmp)
self.lpoint_buffer = savgol_filter(self.lpoint_buffer, window_length, polyorder, axis=0).tolist()
self.lpoint_tmp = self.lpoint_buffer[int(window_length / 2)]
else:
self.lpoint_buffer.append(self.lpoint_tmp)
if len(self.rpoint_buffer) >= window_length:
self.rpoint_buffer.pop(0)
self.rpoint_buffer.append(self.rpoint_tmp)
self.rpoint_buffer = savgol_filter(self.rpoint_buffer, window_length, polyorder, axis=0).tolist()
self.rpoint_tmp = self.rpoint_buffer[int(window_length / 2)]
else:
self.rpoint_buffer.append(self.rpoint_tmp)
def _smoothing_point_mean(self, window_length=5):
if len(self.lpoint_buffer) >= window_length:
self.lpoint_buffer.pop(0)
self.lpoint_buffer.append(self.lpoint_tmp)
self.lpoint_tmp = np.mean(self.lpoint_buffer, axis=0)
else:
self.lpoint_buffer.append(self.lpoint_tmp)
if len(self.rpoint_buffer) >= window_length:
self.rpoint_buffer.pop(0)
self.rpoint_buffer.append(self.rpoint_tmp)
self.rpoint_tmp = np.mean(self.rpoint_buffer, axis=0)
else:
self.rpoint_buffer.append(self.rpoint_tmp)
def _get_pointing(self, screen=True):
if not screen:
l_coord1 = self.joint_info[self.WRISTLEFT]
r_coord1 = self.joint_info[self.WRISTRIGHT]
l_coord2 = self.joint_info[self.ELBOWLEFT]
r_coord2 = self.joint_info[self.ELBOWRIGHT]
else:
l_coord1 = self.joint_info[self.HANDTIPLEFT]
r_coord1 = self.joint_info[self.HANDTIPRIGHT]
l_coord2 = self.joint_info[self.SHOULDERLEFT]
r_coord2 = self.joint_info[self.SHOULDERRIGHT]
self.lpoint_tmp = self._calc_coordinates(l_coord1, l_coord2, screen)
self.rpoint_tmp = self._calc_coordinates(r_coord1, r_coord2, screen)
def _calc_coordinates(self, wrist, elbow, screen=True):
if screen:
'''
Both wrist and elbow should contain (x,y,z) coordinates
screen plane: z=0, ie pz = 0
Line equation:
(ex - px)/(ex - wx) = (ez - pz)/(ez - wz) = ez/(ez - wz)
(ey - py)/(ey - wy) = (ez - pz)/(ez - wz) = ez/(ez - wz)
so:
px = ex - ez(ex-wx) / (ez-wz)
py = ey - ez(ey-wy) / (ez-wz)
'''
if (elbow[2] - wrist[2]) == 0:
return -np.inf, -np.inf
screen_x = elbow[0] - elbow[2] * (elbow[0] - wrist[0]) / (elbow[2] - wrist[2])
screen_y = elbow[1] - elbow[2] * (elbow[1] - wrist[1]) / (elbow[2] - wrist[2])
return screen_x, screen_y
else:
'''
Both wrist and elbow should contain (x,y,z) coordinates
Table plane: y = -0.582
Line equation:
y = (y2-y1)/(x2-x1) * (x-x1) + y1
z = (z2-z1)/(y2-y1) * (y-y1) + z1
so:
x = x1 - (y1-y) / (y2-y1) * (x2-x1)
z = z1 - (y1-y) / (y2-y1) * (z2-z1)
'''
if (elbow[1] - wrist[1]) == 0:
return -np.inf, -np.inf
table_y = -0.582
table_x = wrist[0] - (wrist[1] - table_y) / (elbow[1] - wrist[1]) * (elbow[0] - wrist[0])
table_z = wrist[2] - (wrist[1] - table_y) / (elbow[1] - wrist[1]) * (elbow[2] - wrist[2])
return table_x, table_z
def _smoothing_joint_desk(self, window_length=3, polyorder=2):
for k, v in self.joint_info_buffer.items():
if len(v) >= window_length:
self.joint_info_buffer[k].pop(0)
self.joint_info_buffer[k].append(self.joint_info[k])
self.joint_info_buffer[k] = \
savgol_filter(self.joint_info_buffer[k], window_length, polyorder, axis=0).tolist()
self.joint_info[k] = np.mean(self.joint_info_buffer[k], axis=0)
else:
self.joint_info_buffer[k].append(self.joint_info[k])
def test_run(self):
"""
Start a thread for testing purpose only
"""
threading.Thread(target=self.update_point).start()
def update_point(self):
"""
Connect to the kinect server and get the coordinate
Used for testing only
Use
"""
# record_writer = open('rh_record.txt', 'w')
# var_writer = open('rh_var.txt', 'w')
# self.num_frames = 0
# self.next_session = False
s = connect()
if s is None:
sys.exit(0)
while True:
try:
f = recv_skeleton_frame(s)
self.get_pointing_main(decode_frame(f))
print(p.lpoint, p.rpoint)
# if self.next_session:
# p.num_frames = 0
# record_writer.flush()
# record_writer.write('*'*50 + '\n')
# var_writer.flush()
# var_writer.write('*'*50 + '\n')
# self.next_session = False
# record_writer.write('%s\t%s\n' % (p.lpoint, p.rpoint))
# var_writer.write('%s\t%s\n' % (p.lpoint_var, p.rpoint_var))
# self.num_frames += 1
except Exception as e:
print(e)
s.close()
break
if __name__ == '__main__':
p = Pointing('screen')
p.test_run()
# Plot where the pointing position is on the table
# The table has a width of (-1,1) and depth of (1.0, 1.6)
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(111)
def animate(i):
ax.clear()
if p.screen_mode:
ax.set_xlim(-1, 1) # screen_x
ax.set_ylim(0.3, -1) # screen_y
else:
ax.set_xlim(-1, 1) # width of the table, ie table_x
ax.set_ylim(0, 1.6) # length of the table, ie table_z
plt.gca().invert_yaxis()
# llabel = '(%.2f, %.2f)' % (p.lpoint[0], p.lpoint[1])
# rlabel = '(%.2f, %.2f)' % (p.rpoint[0], p.rpoint[1])
# ax.plot(p.lpoint[0], p.lpoint[1], 'bo', label=llabel)
# ax.plot(p.rpoint[0], p.rpoint[1], 'ro', label=rlabel)
# plt.legend(prop={'size': 35})
# if p.num_frames >= 300:
# p.next_session = True
# plt.title('%s' % p.num_frames)
if p.lpoint_stable:
llabel = 'STABLE'
else:
llabel = 'MOVING'
if p.rpoint_stable:
rlabel = 'STABLE'
else:
rlabel = 'MOVING'
circle_l = plt.Circle(p.lpoint, np.mean(p.lpoint_var) * 2, color='b', label=llabel)
circle_r = plt.Circle(p.rpoint, np.mean(p.rpoint_var) * 2, color='r', label=rlabel)
ax.add_artist(circle_l)
ax.add_artist(circle_r)
plt.title('%s %s' % (llabel, rlabel))
ani = FuncAnimation(fig, animate)
plt.show()
| null |
v1/receiveAndShow_screen.py
|
receiveAndShow_screen.py
|
py
| 15,083 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "socket.socket",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "struct.pack",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "struct.calcsize",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "struct.calcsize",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "struct.calcsize",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "numpy.any",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "numpy.amin",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "numpy.any",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "numpy.amin",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "scipy.signal.savgol_filter",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "scipy.signal.savgol_filter",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "scipy.signal.savgol_filter",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "numpy.inf",
"line_number": 286,
"usage_type": "attribute"
},
{
"api_name": "scipy.signal.savgol_filter",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 349,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 360,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.Circle",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 377,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.Circle",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 378,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 381,
"usage_type": "name"
},
{
"api_name": "matplotlib.animation.FuncAnimation",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 385,
"usage_type": "name"
}
] |
400180185
|
#!/usr/bin/env python3
"""Write gaze samples to samples.txt"""
import argparse
import itertools
import os
import queue
import threading
import time
import sys
import coloredlogs
import cv2 as cv
import numpy as np
import tensorflow as tf
from datasources import Video, Webcam, HDF5Source
from models import ELG
import util.gaze
if __name__ == '__main__':
# Set global log level
parser = argparse.ArgumentParser(description='Demonstration of landmarks localization.')
parser.add_argument('-v', type=str, help='logging level', default='info',
choices=['debug', 'info', 'warning', 'error', 'critical'])
parser.add_argument('--headless', action='store_true')
parser.add_argument('--fps', type=int, default=60, help='Desired sampling rate of webcam')
parser.add_argument('--camera_id', type=int, default=0, help='ID of webcam to use')
args = parser.parse_args()
coloredlogs.install(
datefmt='%d/%m %H:%M',
fmt='%(asctime)s %(levelname)s %(message)s',
level=args.v.upper(),
)
screen_size = np.int32([1920, 1080])
screen_img = np.zeros((*screen_size[::-1], 3), dtype=np.uint8)
calibration_points = itertools.cycle(0.5*screen_size + 0.4*screen_size*(np.mgrid[:3,:3].T.reshape(-1,2)-1))
calibration_point = next(calibration_points)
# Check if GPU is available
from tensorflow.python.client import device_lib
session_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
gpu_available = False
try:
gpus = [d for d in device_lib.list_local_devices(config=session_config)
if d.device_type == 'GPU']
gpu_available = len(gpus) > 0
except:
pass
# Initialize Tensorflow session
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Session(config=session_config) as session:
# Declare some parameters
batch_size = 2
# Define webcam stream data source
# Change data_format='NHWC' if not using CUDA
data_source = HDF5Source(tensorflow_session=session, batch_size=batch_size,
hdf_path = '/home/matt/eyetracking/MPIIGaze.h5',
keys_to_use = ['train/p08'],
data_format='NHWC',
testing = True,
eye_image_shape = (180, 108),
)
print(data_source._num_entries)
#print(next(data_source.entry_generator()))
# Define model
model = ELG(
session, train_data={'videostream': data_source},
first_layer_stride=3,
num_modules=3,
num_feature_maps=64,
learning_schedule=[
{
'loss_terms_to_optimize': {'dummy': ['hourglass', 'radius']},
},
],
)
# Begin visualization thread
inferred_stuff_queue = queue.Queue()
def _visualize_output():
last_frame_index = 0
last_frame_time = time.time()
fps_history = []
all_gaze_histories = []
key = 0
global calibration_point
cv.namedWindow('view')
#cv.setWindowProperty('calibration', cv.WND_PROP_FULLSCREEN, cv.WINDOW_FULLSCREEN)
while True:
print('seeing ' + str(inferred_stuff_queue.qsize()))
print(' and ' + str(inferred_stuff_queue.empty()))
if inferred_stuff_queue.empty():
time.sleep(0.5)
continue
print('not empty!')
output = inferred_stuff_queue.get()
#print('got : ', output)
print(output.keys())
for j in range(batch_size):
eye = output['eye'][j]
eye = cv.cvtColor(0.5*eye+0.5, cv.COLOR_GRAY2BGR)
landmarks = output['landmarks'][j,:]
radius = output['radius'][j]
theta, phi = output['gaze'][j]
dx = -np.cos(theta) * np.sin(phi)
dy = -np.sin(theta)
d2x = theta
d2y = phi
d = np.float32([dx, dy])
d2= np.float32([d2x, d2y])
#for landmark in landmarks:
# cv.drawMarker(eye, tuple(np.round(landmark).astype(np.int32)), (0,255,255))
cv.polylines(
eye,
[np.round(landmarks[8:16]).astype(np.int32)
.reshape(-1, 1, 2)],
isClosed=True, color=(0, 255, 255), thickness=1, lineType=cv.LINE_AA,
)
cv.polylines(
eye,
[np.round(landmarks[0:8]).astype(np.int32)
.reshape(-1, 1, 2)],
isClosed=True, color=(0, 255, 255), thickness=1, lineType=cv.LINE_AA,
)
cv.drawMarker(eye, tuple(np.round(landmarks[16]).astype(np.int32)), (0,255,255))
cv.circle(eye, tuple(np.round(landmarks[17]).astype(np.int32)), radius, (0,255,0), lineType=cv.LINE_AA)
cv.arrowedLine(eye,
tuple(np.round(landmarks[16]).astype(np.int32)),
tuple(np.round(landmarks[16] + radius * d).astype(np.int32)),
(0,0,255), thickness=1, line_type=cv.LINE_AA);
#cv.arrowedLine(eye,
# tuple(np.round(landmarks[16]).astype(np.int32)),
# tuple(np.round(landmarks[16] + radius * d2).astype(np.int32)),
# (255,0,0), thickness=1, line_type=cv.LINE_AA);
#eye = output['old_eye'][j]
#print(eye)
cv.imshow('view', eye)
cv.waitKey()
visualize_thread = threading.Thread(target=_visualize_output, name='visualization')
visualize_thread.daemon = True
visualize_thread.start()
# Do inference forever
infer = model.inference_generator()
while True:
output = next(infer)
#print(output)
#print(output.keys())
#print(output['heatmaps'].shape)
#print(output['landmarks'].shape)
inferred_stuff_queue.put_nowait(output)
#print(inferred_stuff_queue.qsize())
#for frame_index in np.unique(output['frame_index']):
# if frame_index not in data_source._frames:
# continue
# frame = data_source._frames[frame_index]
# if 'inference' in frame['time']:
# frame['time']['inference'] += output['inference_time']
# else:
# frame['time']['inference'] = output['inference_time']
#inferred_stuff_queue.put_nowait(output)
#sample_file.close()
| null |
src/view_data.py
|
view_data.py
|
py
| 7,207 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "coloredlogs.install",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "itertools.cycle",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.mgrid",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.ConfigProto",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "tensorflow.GPUOptions",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "tensorflow.python.client.device_lib.list_local_devices",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "tensorflow.python.client.device_lib",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "tensorflow.logging.set_verbosity",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "tensorflow.logging",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.Session",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "datasources.HDF5Source",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "models.ELG",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "queue.Queue",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "cv2.namedWindow",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_GRAY2BGR",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "numpy.cos",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "cv2.polylines",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "cv2.LINE_AA",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "cv2.polylines",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "cv2.LINE_AA",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "cv2.drawMarker",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "cv2.circle",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "cv2.LINE_AA",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "cv2.arrowedLine",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "numpy.round",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "cv2.LINE_AA",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 169,
"usage_type": "call"
}
] |
18628778
|
import re
import signal
from datetime import datetime, timezone, timedelta
import time
import os
import errno
import psutil
from threading import Thread
import youtube_dl
from common import logger
def time_now():
utc_dt = datetime.utcnow().replace(tzinfo=timezone.utc)
bj_dt = utc_dt.astimezone(timezone(timedelta(hours=8)))
# now = bj_dt.strftime('%Y{0}%m{1}%d{2}').format(*'...')
now = bj_dt.strftime('%Y{0}%m{1}%d').format(*'..')
return now
def match1(text, *patterns):
if len(patterns) == 1:
pattern = patterns[0]
match = re.search(pattern, text)
if match:
return match.group(1)
else:
return None
else:
ret = []
for pattern in patterns:
match = re.search(pattern, text)
if match:
ret.append(match.group(1))
return ret
def wait_child(signum, frame):
logger.debug('receive SIGCHLD')
try:
while True:
# -1 表示任意子进程
# os.WNOHANG 表示如果没有可用的需要 wait 退出状态的子进程,立即返回不阻塞
cpid, status = os.waitpid(-1, os.WNOHANG)
if cpid == 0:
logger.debug('no child process was immediately available')
break
exitcode = status >> 8
logger.debug('child process %s exit with exitcode %s', cpid, exitcode)
except OSError as e:
if e.errno == errno.ECHILD:
logger.error('current process has no existing unwaited-for child processes.')
else:
raise
logger.debug('handle SIGCHLD end')
def signal_handler(signum, frame):
logger.info('收到Terminate信号')
raise youtube_dl.utils.DownloadError(signum)
def kill_child_processes(parent_pid, file_name_, sig=signal.SIGINT):
file_name_ = file_name_ + '.part'
last_file_size = 0.0
logger.info('获取到{0},{1}'.format(parent_pid, file_name_))
while True:
time.sleep(15)
if os.path.isfile(file_name_):
file_size = os.path.getsize(file_name_) / 1024 / 1024 / 1024
file_sizes = os.path.getsize(file_name_)
if float(file_sizes) == last_file_size:
try:
parent = psutil.Process(parent_pid)
except psutil.NoSuchProcess:
return
children = parent.children(recursive=True)
if len(children) == 0:
# parent.send_signal(sig)
parent.terminate()
logger.info('下载卡死pandaTV' + file_name_)
else:
for process in children:
# print(process)
# process.send_signal(sig)
process.terminate()
logger.info('下载卡死' + file_name_)
# time.sleep(1)
if os.path.isfile(file_name_):
logger.info('卡死下载进程可能未成功退出')
continue
else:
logger.info('卡死下载进程成功退出')
break
last_file_size = file_sizes
if float(file_size) >= 2.5:
try:
parent = psutil.Process(parent_pid)
except psutil.NoSuchProcess:
return
children = parent.children(recursive=True)
if len(children) == 0:
# parent.send_signal(sig)
parent.terminate()
logger.info('分段下载pandatv' + file_name_)
else:
for process in children:
# print(process)
# process.send_signal(sig)
process.terminate()
print('分段下载')
logger.info('分段下载' + file_name_)
break
else:
logger.info('监控<%s>线程退出' % file_name_)
return
# os._exit(0)
logger.info('退出监控<%s>线程' % file_name_)
def monitoring(q):
# signal.signal(signal.SIGCHLD, wait_child)
while True:
# print('开始监测')
pid, file_name = q.get()
time.sleep(5)
logger.info('获取到{0},{1}'.format(pid, file_name))
t = Thread(target=kill_child_processes, args=(pid, file_name))
t.start()
def new_hook(t, v, tb):
logger.error("Uncaught exception:", exc_info=(t, v, tb))
# class SafeRotatingFileHandler(TimedRotatingFileHandler):
# def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False):
# TimedRotatingFileHandler.__init__(self, filename, when, interval, backupCount, encoding, delay, utc)
#
# """
# Override doRollover
# lines commanded by "##" is changed by cc
# """
#
# def doRollover(self):
# """
# do a rollover; in this case, a date/time stamp is appended to the filename
# when the rollover happens. However, you want the file to be named for the
# start of the interval, not the current time. If there is a backup count,
# then we have to get a list of matching filenames, sort them and remove
# the one with the oldest suffix.
#
# Override, 1. if dfn not exist then do rename
# 2. _open with "a" model
# """
# if self.stream:
# self.stream.close()
# self.stream = None
# # get the time that this sequence started at and make it a TimeTuple
# currentTime = int(time.time())
# dstNow = time.localtime(currentTime)[-1]
# t = self.rolloverAt - self.interval
# if self.utc:
# timeTuple = time.gmtime(t)
# else:
# timeTuple = time.localtime(t)
# dstThen = timeTuple[-1]
# if dstNow != dstThen:
# if dstNow:
# addend = 3600
# else:
# addend = -3600
# timeTuple = time.localtime(t + addend)
# dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
# ## if os.path.exists(dfn):
# ## os.remove(dfn)
#
# # Issue 18940: A file may not have been created if delay is True.
# ## if os.path.exists(self.baseFilename):
# if not os.path.exists(dfn) and os.path.exists(self.baseFilename):
# os.rename(self.baseFilename, dfn)
# if self.backupCount > 0:
# for s in self.getFilesToDelete():
# os.remove(s)
# if not self.delay:
# self.mode = "a"
# self.stream = self._open()
# newRolloverAt = self.computeRollover(currentTime)
# while newRolloverAt <= currentTime:
# newRolloverAt = newRolloverAt + self.interval
# # If DST changes and midnight or weekly rollover, adjust for this.
# if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
# dstAtRollover = time.localtime(newRolloverAt)[-1]
# if dstNow != dstAtRollover:
# if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
# addend = -3600
# else: # DST bows out before next rollover, so we need to add an hour
# addend = 3600
# newRolloverAt += addend
# self.rolloverAt = newRolloverAt
| null |
Engine/work.py
|
work.py
|
py
| 7,617 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "datetime.datetime.utcnow",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "datetime.timezone.utc",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "datetime.timezone",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "datetime.timezone",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "common.logger.debug",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "common.logger",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "os.waitpid",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.WNOHANG",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "common.logger.debug",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "common.logger",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "common.logger.debug",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "common.logger",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "errno.ECHILD",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "common.logger.error",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "common.logger",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "common.logger.debug",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "common.logger",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "common.logger.info",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "common.logger",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "youtube_dl.utils.DownloadError",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "youtube_dl.utils",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "signal.SIGINT",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "common.logger.info",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "common.logger",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "os.path.getsize",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "os.path.getsize",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "psutil.Process",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "psutil.NoSuchProcess",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "common.logger.info",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "common.logger",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "common.logger.info",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "common.logger",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "common.logger.info",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "common.logger",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "common.logger.info",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "common.logger",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "psutil.Process",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "psutil.NoSuchProcess",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "common.logger.info",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "common.logger",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "common.logger.info",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "common.logger",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "common.logger.info",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "common.logger",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "common.logger.info",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "common.logger",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "common.logger.info",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "common.logger",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "threading.Thread",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "common.logger.error",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "common.logger",
"line_number": 135,
"usage_type": "name"
}
] |
109975958
|
import matplotlib.pyplot as plt
import numpy as np
#显示中文
plt.rcParams['font.sans-serif']='SimHei'
#显示-号
plt.rcParams['axes.unicode_minus']=False
#内容太大时,强制显示所有内容,不省略
np.set_printoptions(threshold=np.NaN)
#读取数据,二进制
data=np.load('国民经济核算季度数据.npz')
print(type(data))
for i in data:
print(i)
name=data['columns']
values=data['values']
# print(name)
# print(values)
# 数据
x=range(9)
y=values[0,6:16]
print(x)
print(y)
#绘制
plt.figure()
plt.title('2000年第一季度各产业生产总值直方图')
plt.ylabel('生产总值(亿元)')
labels=['农业','工业','建筑业','批发','交通运输','餐饮','金融','房地产','其他行业']
plt.xticks(range(9),labels)
plt.pie(y,labels=labels,autopct='%1.1f%%')
# plt.savefig('报告3.png')
plt.show()
| null |
数据分析/day3/14、作业2.py
|
14、作业2.py
|
py
| 885 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "numpy.set_printoptions",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.NaN",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "numpy.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.pie",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 38,
"usage_type": "name"
}
] |
100602281
|
# -*- coding:utf-8 -*-
# @Author : 江湖一笑
# @Time : 2019/6/18 7:33
# @Software : PyCharm
# @Python_verison : 3.7
'''
多进程multiprocessing模块的使用和多线程threading模块的用法类似,multiprocessing提供了本地和远程的并发性,
有效的通过全局解释锁(GIL)来使用进程,由于GIL的存在,在CPU密集型的程序中,使用多线程并不能有效的利用多核CPU的优势,
因为一个解释器在同一时刻,只会有一个线程在执行,所以,multiprocessing模块可以充分利用硬件的多处理器来进行工作,
它支持unix和windows系统上的运行。
'''
from time import sleep,ctime
import multiprocessing
def super_play(file,time):
for i in range(2):
print('playing start %s,%s'%(file,ctime()))
sleep(time)
dict = {'霸王别姬.mp4':3,'一万个可能.mp3':2}
lists = []
for file,time in dict.items():
t = multiprocessing.Process(target=super_play,args=(file,time))
lists.append(t)
if __name__ == '__main__':
for i in lists:
i.start()
for i in lists:
i.join() # 等待线程终止
print('all end %s'%ctime())
'''
playing start 霸王别姬.mp4,Wed Jun 19 07:39:08 2019
playing start 一万个可能.mp3,Wed Jun 19 07:39:08 2019
playing start 一万个可能.mp3,Wed Jun 19 07:39:10 2019
playing start 霸王别姬.mp4,Wed Jun 19 07:39:11 2019
all end Wed Jun 19 07:39:14 2019
'''
# 从上面执行的结果可以看出,多进程和多线程的结果是一样的,看不出来他们有啥差异,
# 我们利用multiprocessing中的Process创建进程,
# 进程中也有start,join方法
# Process类初始化参数有(self, group=None, target=None, name=None, args=(), kwargs={})
# target 表示调用对象
# group 基本不用
# name 别名
# args 位置参数
# kwargs 字典参数
| null |
Web-Autotest-Python/Python-Selenium/python多线程-第10章/多进程技术03/multiprocessing模块01.py
|
multiprocessing模块01.py
|
py
| 1,876 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "time.ctime",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "time.ctime",
"line_number": 32,
"usage_type": "call"
}
] |
148665457
|
from collections import deque
for ir in range(int(input())):
N = int(input())
A = []
dir = [[-1, 0], [0, 1], [1, 0], [0, -1]]
for i in range(N):
a = input()
b = []
for i2 in a:
b.append(int(i2))
A.append(b)
A_cal = list([90000 for i in range(N)] for i1 in range(N))
A_cal[0][0], A_cal[N-1][N-1] = 0, 9000
visit = list([0 for i in range(N)] for i1 in range(N))
queue = deque()
queue.append([0, 0])
while queue:
vx, vy = queue.popleft()
time = A_cal[vx][vy]
for d in dir:
cx, cy = vx + d[0], vy + d[1]
if 0 <= cx < N and 0 <= cy < N:
if time + A[cx][cy] < A_cal[cx][cy]:
queue.append([cx, cy])
A_cal[cx][cy] = time + A[cx][cy]
print('#{} {}'.format(ir + 1, A_cal[N-1][N-1]))
| null |
lecture/algorithm/problem/1249. 보급로.py
|
1249. 보급로.py
|
py
| 869 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "collections.deque",
"line_number": 18,
"usage_type": "call"
}
] |
602102204
|
#
# Copyright (c) 2012, 2013, 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import codecs
import git
import os
import subprocess
from git_upstream.lib.utils import GitMixin
from git_upstream.log import LogDedentMixin
from git_upstream import PROJECT_ROOT
REBASE_EDITOR_SCRIPT = "rebase-editor"
# ensure name of file will match any naming filters used by editors to
# enable syntax highlighting
REBASE_EDITOR_TODO = "git-upstream/git-rebase-todo"
class RebaseEditor(GitMixin, LogDedentMixin):
def __init__(self, finish_args, interactive=False, *args, **kwargs):
self._interactive = interactive
super(RebaseEditor, self).__init__(*args, **kwargs)
self._editor = REBASE_EDITOR_SCRIPT
# interactive switch here determines if the script that is given
# to git-rebase to run as it's editor, will in turn exec an editor
# for the user to look through the instructions before rebase
# applies them
if interactive == 'debug':
self.log.debug("Enabling interactive mode for rebase")
self._editor = "%s --interactive" % self.editor
self.finish_args = finish_args
@property
def editor(self):
return self._editor
def _todo_epilogue(self):
if git.Git().version_info < (2, 6, 0):
resource = 'todo_epilogue_1_7_5.txt'
else:
resource = 'todo_epilogue_2_6_0.txt'
with open('%s/resources/%s' % (PROJECT_ROOT, resource),
'r') as epilogue:
return epilogue.read()
def _write_todo(self, commits, *args, **kwargs):
todo_file = os.path.join(self.repo.git_dir, REBASE_EDITOR_TODO)
if os.path.exists(todo_file):
os.remove(todo_file)
if not os.path.exists(os.path.dirname(todo_file)):
os.mkdir(os.path.dirname(todo_file))
# see if onto is set in the args or kwargs
onto = kwargs.get('onto', None)
for idx, arg in enumerate(args):
if arg.startswith("--onto"):
# either onto is after the option in this arg, or it's the
# next arg, or not providing it is an exception
onto = arg[7:] or args[idx + 1]
break
root = None
with codecs.open(todo_file, "w", "utf-8") as todo:
for commit in commits:
if not root:
root = commit.parents[0]
subject = commit.message.splitlines()[0]
todo.write("pick %s %s\n" % (self._shorten(commit), subject))
# if root isn't set at this point, then there were no commits
if not root:
todo.write("noop\n")
todo.write(self._todo_epilogue() %
{'shortrevisions': "%s..%s" % (self._shorten(root),
self._shorten(commit)),
'shortonto': self._shorten(onto or root)})
return todo_file
def _insert_exec_to_todo(self):
if not self.finish_args:
# no need to insert, as asked not to perform a finish/merge
return
todo_file = os.path.join(self.repo.git_dir, REBASE_EDITOR_TODO)
exec_line = "exec %s\n" % " ".join(self.finish_args)
insn_data = None
with codecs.open(todo_file, "r", "utf-8") as todo:
insn_data = todo.readlines()
# Cannot just append to file, as rebase appears to cut off
# after the second blank line in a row is encountered.
# Need to find the last instruction and insert afterwards,
# or if we find noop replace.
last = 0
for idx, line in enumerate(insn_data):
# comment line - ignore
if line.startswith("#"):
continue
# found noop - just replace
if line.rstrip() == "noop":
insn_data[idx] = exec_line
break
# not an empty line
if line.rstrip() != "":
last = idx
else:
# didn't break so need to insert after last instruction
insn_data.insert(last + 1, exec_line)
# replace contents to include exec
try:
todo = codecs.open(todo_file, "w", "utf-8")
todo.writelines(insn_data)
# ensure the filesystem has the correct contents
todo.stream.flush()
os.fsync(todo.stream.fileno())
finally:
todo.close()
def _shorten(self, commit):
if not commit:
return "<none>"
return self.git.rev_parse(commit, short=True)
def _set_editor(self, editor):
env = os.environ.copy()
# if git is new enough, we can edit the sequence without overriding
# the editor, which allows rebase to call the correct editor if
# reaches a 'reword' command before it has exited for the first time
# otherwise the custom editor of git-upstream will executed with
# the path to a commit message as an argument and will need to be able
# to call the preferred user editor instead
if self.git.version_info >= (1, 7, 8):
env['GIT_SEQUENCE_EDITOR'] = editor
else:
env['GIT_UPSTREAM_GIT_EDITOR'] = self.git_editor
env['GIT_EDITOR'] = editor
return env
def cleanup(self):
todo_file = os.path.join(self.repo.git_dir, REBASE_EDITOR_TODO)
if os.path.exists(todo_file):
os.remove(todo_file)
def run(self, commits, *args, **kwargs):
"""
Reads the list of commits given, and constructions the instructions
file to be used by rebase.
Will spawn an editor if the constructor was told to be interactive.
Additional arguments *args and **kwargs are to be passed to 'git
rebase'.
"""
todo_file = self._write_todo(commits, *args, **kwargs)
if self._interactive:
# spawn the editor
# It is not safe to redirect I/O channels as most editors will
# be expecting that I/O is from/to proper terminal. YMMV
user_editor = self.git_sequence_editor or self.git_editor
status = subprocess.call("%s %s" % (user_editor, todo_file),
shell=True)
if status != 0:
return status, None, "Editor returned non-zero exit code"
editor = "%s %s" % (self.editor, todo_file)
environ = self._set_editor(editor)
cmd = ['git', 'rebase', '--interactive']
cmd.extend(self.git.transform_kwargs(**kwargs))
cmd.extend(args)
# ensure that the finish will always be called
self._insert_exec_to_todo()
mode = os.environ.get('TEST_GIT_UPSTREAM_REBASE_EDITOR', "")
if mode.lower() == "debug":
# In general it's not recommended to run rebase in direct
# interactive mode because it's not possible to capture the
# stdout/stderr, but sometimes it's useful to allow it for
# debugging to check the final result.
try:
return subprocess.call(cmd), None, None
finally:
self.cleanup()
elif not self._interactive:
# If in non-interactive mode use subprocess instead of exec
#
# This ensures that if no conflicts occur, that the calling
# git-upstream process will be able to switch the current
# branch after the git-rebase subprocess exits. This is not
# possible when using exec to have git-rebase replace the
# existing process. Since git-rebase performs checks once
# it is completed running the instructions (todo file),
# changing the current branch checked out in the git
# repository via the final instruction (calling
# `git-upstream import --finish ...`) results in git-rebase
# exiting with an exception.
#
# For interactive mode it is impossible to perform a rebase
# via subprocess and have it correctly attach an editor to
# the console for users to edit/reword commits. The
# consequence of using exec to support interactive usage
# prevents correctly switching final branch to anything other
# than the branch that git-rebase was started on (which will
# be the import branch).
#
# As interactive mode involves user intervention it seems a
# reasonable compromise to require manual switch of branches
# after being finished until such time that an alternative
# solution can be found.
try:
return 0, subprocess.check_output(
cmd, stderr=subprocess.STDOUT, env=environ), None
except subprocess.CalledProcessError as e:
return e.returncode, e.output, None
finally:
self.cleanup()
else:
cmd.append(environ)
os.execlpe('git', *cmd)
@property
def git_sequence_editor(self):
return os.environ.get('GIT_SEQUENCE_EDITOR',
self.git.config("sequence.editor",
with_exceptions=False))
@property
def git_editor(self):
return os.environ.get("GIT_EDITOR",
self.git.var("GIT_EDITOR",
with_exceptions=False))
| null |
git_upstream/lib/rebaseeditor.py
|
rebaseeditor.py
|
py
| 10,175 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "git_upstream.lib.utils.GitMixin",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "git_upstream.log.LogDedentMixin",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "git.Git",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "git_upstream.PROJECT_ROOT",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "codecs.open",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "codecs.open",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "codecs.open",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "os.fsync",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "os.environ.copy",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 202,
"usage_type": "attribute"
},
{
"api_name": "subprocess.call",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "subprocess.STDOUT",
"line_number": 240,
"usage_type": "attribute"
},
{
"api_name": "subprocess.CalledProcessError",
"line_number": 241,
"usage_type": "attribute"
},
{
"api_name": "os.execlpe",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 252,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 259,
"usage_type": "attribute"
}
] |
615205495
|
import os
import json
import csv
import boto3
def save_to_dynamodb(db_table, item):
db_table.put_item(
Item=item
)
def lambda_handler(event, context):
# extract info from event
bucket = event['Records'][0]['s3']['bucket']['name']
s3_object = event['Records'][0]['s3']['object']['key']
# temprorary file to store csv downloaded from s3
tmp_csv_file = '/tmp/' + s3_object
# Download object from S3
s3 = boto3.client('s3')
s3.download_file(bucket, s3_object, tmp_csv_file)
# creating dynamodb instance
dynamodb_table = os.environ['dynamodb_table_name']
db_table = boto3.resource('dynamodb').Table(dynamodb_table)
# iterate through csv and add all rows
# csv can have distinct columns only first 2 columns need to be PK and SK resp
with open(tmp_csv_file, 'r', encoding='utf8') as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
save_to_dynamodb(db_table, row)
return {
'statusCode': 200,
'body': json.dumps('CSV data successfully added to DynamoDB!')
}
| null |
Serverless_Application/csv_processor.py
|
csv_processor.py
|
py
| 1,006 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "boto3.client",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "boto3.resource",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "csv.DictReader",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 39,
"usage_type": "call"
}
] |
605256556
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 17 16:32:30 2021
@author: Spyro
"""
# import main Flask class and request object
from flask import Flask, request
from flask_cors import CORS, cross_origin
import api_calls
# create the Flask app
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
@app.route('/getTreeData', methods=["GET"])
def getTreeData():
result_dict_cp = {"data":[{'item': 'GM12878', 'type': '', 'belongsTo': '', 'checked': False, 'children': [{'item': 'ATF2_ENCFF210HTZ', 'type': 'tf', 'belongsTo': 'GM12878', 'checked': False, 'children': []}, {'item': 'ATF2_ENCFF210HTZ', 'type': 'tf', 'belongsTo': 'GM12878', 'checked': False, 'children': []}, {'item': 'ATF2_ENCFF210HTZ', 'type': 'tf', 'belongsTo': 'GM12878', 'checked': False, 'children': []}, {'item': 'ATF2_ENCFF210HTZ', 'type': 'tf', 'belongsTo': 'GM12878', 'checked': False, 'children': []}, {'item': 'ATF2_ENCFF210HTZ', 'type': 'tf', 'belongsTo': 'GM12878', 'checked': False, 'children': []}, {'item': 'ATF2_ENCFF210HTZ', 'type': 'tf', 'belongsTo': 'GM12878', 'checked': False, 'children': []}, {'item': 'ATF2_ENCFF210HTZ', 'type': 'tf', 'belongsTo': 'GM12878', 'checked': False, 'children': []}]}]}
#return api_calls.get_biosource_list_for_tree()
return result_dict_cp
@app.route('/getGraphPaths', methods=["POST"])
def getGraphPaths():
request_data = request.get_json()
#data = {"data": request_data, "hey": "there"}
return api_calls.getPathList(request_data)
@app.route('/json-example')
def json_example():
return 'JSON Object Example'
if __name__ == '__main__':
# run app in debug mode on port 5000
app.run(debug=True, port=5000)
| null |
bin/scripts/visualization_app_api_start.py
|
visualization_app_api_start.py
|
py
| 1,729 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "api_calls.getPathList",
"line_number": 29,
"usage_type": "call"
}
] |
223887987
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 5 01:19:25 2021
@author: ma7mo
"""
# Data Preprocessing Tools
# importing libs
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# importing dataset
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[: , :-1].values
y = dataset.iloc[: , -1].values
# taking care of missing data
# replace it by mean
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values= np.nan, strategy='mean')
imputer = imputer.fit(X[ :, 1:3])
X[: , 1:3] = imputer.transform(X[ : , 1:3])
print('this is X after fill the nan by the mean\n',X,'\n','-'*50)
# encoding categorial data
from sklearn.preprocessing import LabelEncoder
labelencoder_X = LabelEncoder()
# you can fit then transform or both in the same time
X[: ,0]=labelencoder_X.fit_transform(X[: , 0])
print('this is X after encode categorial data (country)\n',X,'\n','-'*50)
# contries encoded to numbers but the numbers that replaced to the countries will effect the data
# this is not valid so we need to make each category in single coulmns and put in it 0\1
from sklearn.preprocessing import OneHotEncoder
# here , we determine the column ([0])
onehotencoder = OneHotEncoder(categorical_features=[0])
X = onehotencoder.fit_transform(X).toarray()
print('this is X after hot encode categorial data (country)\n',X,'\n','-'*50)
# y encoding (yes/no) > (0/1)
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
print('this is y after encoding\n',y,'\n','-'*50)
# splitting dataset into the training and test
from sklearn.model_selection import train_test_split
# random_state = 42 (42 is recommended)
# if i wanna stop random >> random_state = 0 , shuffle = False , stratify = None
X_train , X_test ,y_train,y_test = train_test_split(X,y,test_size=0.2 , random_state =42)
# feature scaling
# Salary coulmn contain big numbers compared with age , So salary will affect more and the age will not affect at all
# make the data in the same scale (0~1)
# there are 2 ways (Standardisation & normalisation)
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
| null |
P14-Part1-Data-Preprocessing/Section 3 - Data Preprocessing in Python/Python/my_data_preprocessing_tools.py
|
my_data_preprocessing_tools.py
|
py
| 2,193 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.Imputer",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "sklearn.preprocessing.LabelEncoder",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.OneHotEncoder",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.LabelEncoder",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 59,
"usage_type": "call"
}
] |
240295490
|
# The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot
import time
from numpy import pi, cos, sin, linspace, roll, zeros_like
from bokeh.plotting import cursession, figure, show, output_server
from bokeh.models import GlyphRenderer
N = 50 + 1
r_base = 8
theta = linspace(0, 2*pi, N)
r_x = linspace(0, 6*pi, N-1)
rmin = r_base - cos(r_x) - 1
rmax = r_base + sin(r_x) + 1
colors = ["FFFFCC", "#C7E9B4", "#7FCDBB", "#41B6C4", "#2C7FB8", "#253494", "#2C7FB8", "#41B6C4", "#7FCDBB", "#C7E9B4"] * 5
cx = cy = zeros_like(rmin)
output_server("animated")
p = figure(x_range=[-11, 11], y_range=[-11, 11])
p.annular_wedge(
cx, cy, rmin, rmax, theta[:-1], theta[1:],
inner_radius_units="data",
outer_radius_units="data",
fill_color = colors,
line_color="black",
)
show(p)
renderer = p.select(dict(type=GlyphRenderer))
ds = renderer[0].data_source
while True:
rmin = ds.data["inner_radius"]
rmin = roll(rmin, 1)
ds.data["inner_radius"] = rmin
rmax = ds.data["outer_radius"]
rmax = roll(rmax, -1)
ds.data["outer_radius"] = rmax
cursession().store_objects(ds)
time.sleep(.10)
| null |
examples/plotting/server/animated.py
|
animated.py
|
py
| 1,161 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.linspace",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "numpy.cos",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "bokeh.plotting.output_server",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "bokeh.plotting.figure",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "bokeh.plotting.show",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "bokeh.models.GlyphRenderer",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "numpy.roll",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.roll",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "bokeh.plotting.cursession",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 50,
"usage_type": "call"
}
] |
586075123
|
from PIL import Image, ImageDraw, ImageChops, ImageStat
import argparse
import os
from functools import partial, reduce
from random import random, randint
from time import process_time
import cairo
import numpy
import datetime
# The Genetic Algorithm DNA strand will be a tuple of the polygons.
# With each polygon containing a tuple of vertices and colour.
# The colour will be a tuple of length 4 containing values for the three colours and transparency.
# Here is a an example DNA strand with the first polygon.
# (((1,1),(40,40),(1,40),(255,0,0,255)), ...)
def breed(first, second, config):
# Should this handle variable length DNA strands?
# This would only require checking against the smaller length.
if config.randominheritance:
index = randint(0, len(first) - 1)
else:
# TODO: This should randomly (but evenly) choose genes from both parents.
index = len(first) // 2
# Should this only return one candidate? Or should it return both possible candidates?
return first[:index] + second[index:]
def chromosome_length(candidate, config):
# All polygon's have the same number of vertices
# They have the same number of colours in the tuples
return len(candidate[0][0]) * len(candidate) * 4
def mutate_value(value, config):
if random() > config.probability:
return value
value += random() * config.amount * 2 - config.amount
if value > 1:
value = 1
elif value < 0:
value = 0
return value
def mutate_gene(gene, config):
value_map = partial(mutate_value, config=config)
return tuple(map(value_map, gene))
def mutate(chromosome, config):
mutate_map = partial(mutate_gene, config=config)
return tuple(map(mutate_map, chromosome))
#@profile
def render_image(chromosome, config, size=None):
if size is None:
size = config.width, config.height
width, height = size
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
ctx = cairo.Context(surface)
ctx.scale(width, height)
for polygon in chromosome:
red, blue, green, alpha = polygon[-4:]
# vertices = extract_vertices(polygon, config, size)
ctx.new_path()
ctx.move_to(polygon[0], polygon[1])
for i in range(2, len(polygon) - 4, 2):
ctx.line_to(polygon[i], polygon[i+1])
ctx.close_path()
ctx.set_source_rgba(red, green, blue, alpha)
ctx.fill()
return surface
def cairo_to_numpy(surface, width, height):
buf = surface.get_data()
a = numpy.frombuffer(buf, numpy.uint8)
a.shape = (width, height, 4)
a[:, :, 2] = 255
return a
def convert_cairo_to_pil(surface):
cairo_format = surface.get_format()
if cairo_format == cairo.FORMAT_ARGB32:
pil_mode = 'RGB'
# Cairo has ARGB. Convert this to RGB for PIL which supports only RGB or
# RGBA.
argbArray = numpy.frombuffer(surface.get_data(), numpy.uint8).reshape( -1, 4 )
rgbArray = argbArray[ :, 2::-1 ]
pilData = rgbArray.reshape( -1 ).tostring()
else:
raise ValueError( 'Unsupported cairo format: %d' % cairo_format )
pil_image = Image.frombuffer( pil_mode,
( surface.get_width(), surface.get_height() ), pilData, "raw",
pil_mode, 0, 1 )
pil_image = pil_image.convert('RGB')
return pil_image
#@profile
def fitness(chromosome, config):
# This is the slowest function because of the number of times it is called.
img = convert_cairo_to_pil(render_image(chromosome, config))
difference = ImageChops.difference(config.image, img)
stats = ImageStat.Stat(difference)
if not config.linear:
return sum(stats.sum2)
else:
return sum(stats.sum)
#@profile
def sort_chromosomes(chromosomes, config):
fitness_config = partial(fitness, config=config)
# This can easily be made parallel. Would the speed up be faster than the piping of objects?
# It would be faster on linux where child processes can clone the memory of the parent.
graded = map(fitness_config, chromosomes)
c = zip(chromosomes, graded)
c = sorted(c, key=lambda x: x[1])
return list(map(lambda x: x[0], c))
#@profile
def evolve(chromosomes, config):
# TODO: This could be improved by sorting the chromosomes at the end before returning them.
# TODO: This needs to be modified to handle asexual reproduction.
# This would remove the need to sort them when saving the image.
# chromosomes = sort_chromosomes(chromosomes, config)
retain_length = int(len(chromosomes) * config.cutoff)
parents = chromosomes[:retain_length]
# Randomly add other individuals to promote genetic diversity.
# TODO: Add a command line argument for this.
'''for individual in c[retain_length:]:
# random_select is not defined anywhere.
# Make it a command line option and git it an explanation
if random_select > random():
parents.append(individual[0])
'''
parents_length = len(parents)
children = []
while len(children) < config.population:
# Should this be generalised to n number of parents?
male = randint(0, parents_length - 1)
female = randint(0, parents_length - 1)
if male != female or config.population == 1:
male = parents[male]
female = parents[female]
child = breed(male, female, config)
child = mutate(child, config)
children.append(child)
if config.fit:
children.extend(parents)
# Should this be restricted to the population cap?
return sort_chromosomes(children, config)
def vertices(config):
return [[random(), random()] for _ in range(0, config.vertices)]
def colour(config):
return [random() for _ in range(0, 4)]
def gene(config):
return [random() for _ in range(config.vertices * 2 + 4)]
def chromosome(config):
return [gene(config) for _ in range(config.polygons)]
def population(config):
return [chromosome(config) for _ in range(config.population)]
def genetic_algorithm(config):
p = population(config)
# Initially sort the chromosomes
p = sort_chromosomes(p, config)
if not os.path.exists(config.output):
os.makedirs(config.output)
i = 1
saved = config.saved
width = config.saveSize
height = int(config.height / config.width * width)
start = process_time()
while True:
#for i in range(2):
p = evolve(p, config)
if i % saved == 0:
#p = sort_chromosomes(p, config)
output = render_image(p[0], config, (width, height))
output.write_to_png(config.output + str(i) + ".png")
print("\rGeneration:", i, "Total Time:", datetime.timedelta(seconds=(process_time() - start)),
"s Time per generation:", datetime.timedelta(seconds=(process_time() - start)/i), end="")
i += 1
def file_exists(x):
"""
'Type' for argparse - checks that file exists, but does not open.
"""
if not os.path.exists(x):
raise argparse.ArgumentError("{0} does not exist".format(x))
return x
class Config(object):
def __init__(self):
pass
def convert_arguments_into_config(arguments):
# Construct a config class and set all of the arguments as variables of the instance of the class.
# Some other settings and information will be added to the object later.
# These include the dimensions of the picture.
# This config will be returned and then passed as a parameter to almost all functions.
config = Config()
config.file = arguments.file
config.population = arguments.population
config.cutoff = arguments.cutoff
config.probability = arguments.chance
config.amount = arguments.amount
config.randominheritance = arguments.randominheritance
config.linear = arguments.linear
config.fit = arguments.fit
config.polygons = arguments.polygons
config.vertices = arguments.vertices
config.lines = arguments.lines
config.threads = arguments.threads
config.size = arguments.size
config.output = arguments.output
config.saved = arguments.save
config.saveSize = arguments.saveSize
return config
def scale_image(config):
if config.size != 0:
factor = config.size / config.original_width
config.width = config.size
config.height = int(config.original_height * factor)
else:
config.width = config.original_width
config.height = config.original_height
size = config.width, config.height
im = config.original_image.convert("RGB")
im.thumbnail(size, Image.ANTIALIAS)
config.image = im
pixels = im
config.pixels = pixels.load()
# config.numpy = numpy.asarray(im, dtype="int32")
return config
def parse_arguments():
# TODO: Not all of these arguments have been implemented yet.
# TODO: Some of these arguments needs single letter values.
parser = argparse.ArgumentParser(
description=("This program will grow an image out of polygons to approximate the input picture. "
"Note: Not all of the following arguments have been implemented."),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-f", "--file", help="The path to the picture you want to grow", required=True,
type=file_exists)
parser.add_argument("-o", "--output", help="The output directory that the images will be saved to.", required=True)
parser.add_argument("--save", help="Save every x images", type=int, default=100)
parser.add_argument("--saveSize", help="The resoltution to save the image as.", type=int, default=0)
parser.add_argument("-p", "--population",
help=("The number of individuals in the population. "
"This determines the size of the gene pool. "
"If the population is 1, then reproduction occurs asexually."),
type=int, default=50)
parser.add_argument("-c", "--cutoff",
help=("This determines the number of individuals from a given generation that get selected "
"to breed the next. A lower percentage indicates more selective breeding."),
type=float, default=0.15)
parser.add_argument("--chance",
help=("The chance that a string of DNA will mutate during breeding. "
"A higher chance represents less accurate DNA replication."),
type=float, default=0.01)
parser.add_argument("--amount",
help="The amount of random data that will be introduced to a string of DNA during mutation.",
type=float, default=0.1)
# Maybe change this to an inheritance argument and give a choice of two values. Random and Equal.
parser.add_argument("--randominheritance",
help=("If enabled, genes are inherited randomly from mother and father, "
"as opposed to inheriting an even number from both"),
action="store_true")
# NOTE: I don't think it is exponential, but I am just copying the hints from the website.
parser.add_argument("-l", "--linear",
help=("This determines if the fitness algorithm is linear or exponential. "
"By default the fitness function is exponential. "
"Adding this argument will change it to linear."),
action="store_true")
parser.add_argument("--fit",
help=("If enabled, the lifespan of an individual is determined by its fitness, "
"with the strongest individuals from each generation surviving."),
action="store_true")
parser.add_argument("--polygons",
help=("This determines the number of visible polygons which are used in the drawing. "
"A higher number leads to more detailed drawings."),
type=int, default=125)
parser.add_argument("--vertices",
help=("The number of sides the each polygon has. "
"For example, setting this to 3 will create triangles."),
type=int, default=3)
parser.add_argument("--lines",
help=("This determines if the polygons are filled. "
"By default the polygons are filled with a solid colour. "
"Activating this will only draw lines."),
action="store_true")
parser.add_argument("-t", "--threads",
help=("This determines the number of threads that are used. "
"Setting this higher than the number of CPU cores will cause a slowdown. "
"Setting it 0 will cause it to use all the cores."),
type=int, default=0)
parser.add_argument("-s", "--size",
help=("This sets the internal resolution of the image. "
"The image will be scaled so this value is the horizontal resolution."
"Setting this to a higher value will increase the accuracy, but cause a large slow down"
"Setting this to 0 will use the native resolution of the image"),
type=int, default=0)
return parser.parse_args()
if __name__ == "__main__":
args = parse_arguments()
config = convert_arguments_into_config(args)
img = Image.open(args.file)
config.original_image = img
config.original_width, config.original_height = img.size
if config.saveSize == 0:
if config.size == 0:
config.saveSize = config.original_width
else:
config.saveSize = config.size
scale_image(config)
genetic_algorithm(config)
| null |
grow.py
|
grow.py
|
py
| 14,158 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "random.randint",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "cairo.ImageSurface",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "cairo.FORMAT_ARGB32",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "cairo.Context",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.frombuffer",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "cairo.FORMAT_ARGB32",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "numpy.frombuffer",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.frombuffer",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "PIL.ImageChops.difference",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "PIL.ImageChops",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "PIL.ImageStat.Stat",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "PIL.ImageStat",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "functools.partial",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 208,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "time.process_time",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "time.process_time",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "time.process_time",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 238,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentError",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "PIL.Image.ANTIALIAS",
"line_number": 285,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 285,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentDefaultsHelpFormatter",
"line_number": 303,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 370,
"usage_type": "name"
}
] |
467653873
|
import shutil
from unittest.mock import Mock
from pyspark.sql import DataFrame
from pyspark.sql import functions as F
from butterfree.configs import environment
from butterfree.constants import DataType
from butterfree.constants.columns import TIMESTAMP_COLUMN
from butterfree.extract import Source
from butterfree.extract.readers import TableReader
from butterfree.load import Sink
from butterfree.load.writers import HistoricalFeatureStoreWriter
from butterfree.pipelines.feature_set_pipeline import FeatureSetPipeline
from butterfree.testing.dataframe import assert_dataframe_equality
from butterfree.transform import FeatureSet
from butterfree.transform.features import Feature, KeyFeature, TimestampFeature
from butterfree.transform.transformations import CustomTransform, SparkFunctionTransform
from butterfree.transform.utils import Function
def create_temp_view(dataframe: DataFrame, name):
dataframe.createOrReplaceTempView(name)
def create_db_and_table(spark, table_reader_id, table_reader_db, table_reader_table):
spark.sql(f"create database if not exists {table_reader_db}")
spark.sql(f"use {table_reader_db}")
spark.sql(
f"create table if not exists {table_reader_db}.{table_reader_table} " # noqa
f"as select * from {table_reader_id}" # noqa
)
def divide(df, fs, column1, column2):
name = fs.get_output_columns()[0]
df = df.withColumn(name, F.col(column1) / F.col(column2))
return df
class TestFeatureSetPipeline:
def test_feature_set_pipeline(
self, mocked_df, spark_session, fixed_windows_output_feature_set_dataframe
):
# arrange
table_reader_id = "a_source"
table_reader_table = "table"
table_reader_db = environment.get_variable("FEATURE_STORE_HISTORICAL_DATABASE")
create_temp_view(dataframe=mocked_df, name=table_reader_id)
create_db_and_table(
spark=spark_session,
table_reader_id=table_reader_id,
table_reader_db=table_reader_db,
table_reader_table=table_reader_table,
)
dbconfig = Mock()
dbconfig.get_options = Mock(
return_value={
"mode": "overwrite",
"format_": "parquet",
"path": "test_folder/historical/entity/feature_set",
}
)
# act
test_pipeline = FeatureSetPipeline(
source=Source(
readers=[
TableReader(
id=table_reader_id,
database=table_reader_db,
table=table_reader_table,
),
],
query=f"select * from {table_reader_id} ", # noqa
),
feature_set=FeatureSet(
name="feature_set",
entity="entity",
description="description",
features=[
Feature(
name="feature1",
description="test",
transformation=SparkFunctionTransform(
functions=[
Function(F.avg, DataType.FLOAT),
Function(F.stddev_pop, DataType.FLOAT),
],
).with_window(
partition_by="id",
order_by=TIMESTAMP_COLUMN,
mode="fixed_windows",
window_definition=["2 minutes", "15 minutes"],
),
),
Feature(
name="divided_feature",
description="unit test",
dtype=DataType.FLOAT,
transformation=CustomTransform(
transformer=divide, column1="feature1", column2="feature2",
),
),
],
keys=[
KeyFeature(
name="id",
description="The user's Main ID or device ID",
dtype=DataType.INTEGER,
)
],
timestamp=TimestampFeature(),
),
sink=Sink(writers=[HistoricalFeatureStoreWriter(db_config=dbconfig)],),
)
test_pipeline.run()
# assert
path = dbconfig.get_options("historical/entity/feature_set").get("path")
df = spark_session.read.parquet(path).orderBy(TIMESTAMP_COLUMN)
target_df = fixed_windows_output_feature_set_dataframe.orderBy(
test_pipeline.feature_set.timestamp_column
)
# assert
assert_dataframe_equality(df, target_df)
# tear down
shutil.rmtree("test_folder")
| null |
tests/integration/butterfree/pipelines/test_feature_set_pipeline.py
|
test_feature_set_pipeline.py
|
py
| 4,873 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pyspark.sql.DataFrame",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.col",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "butterfree.configs.environment.get_variable",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "butterfree.configs.environment",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "unittest.mock.Mock",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "unittest.mock.Mock",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "butterfree.pipelines.feature_set_pipeline.FeatureSetPipeline",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "butterfree.extract.Source",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "butterfree.extract.readers.TableReader",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "butterfree.transform.FeatureSet",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "butterfree.transform.features.Feature",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "butterfree.transform.transformations.SparkFunctionTransform",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "butterfree.transform.utils.Function",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions.avg",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "butterfree.constants.DataType.FLOAT",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "butterfree.constants.DataType",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "butterfree.transform.utils.Function",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions.stddev_pop",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "butterfree.constants.DataType.FLOAT",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "butterfree.constants.DataType",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "butterfree.constants.columns.TIMESTAMP_COLUMN",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "butterfree.transform.features.Feature",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "butterfree.constants.DataType.FLOAT",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "butterfree.constants.DataType",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "butterfree.transform.transformations.CustomTransform",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "butterfree.transform.features.KeyFeature",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "butterfree.constants.DataType.INTEGER",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "butterfree.constants.DataType",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "butterfree.transform.features.TimestampFeature",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "butterfree.load.Sink",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "butterfree.load.writers.HistoricalFeatureStoreWriter",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "butterfree.constants.columns.TIMESTAMP_COLUMN",
"line_number": 121,
"usage_type": "argument"
},
{
"api_name": "butterfree.testing.dataframe.assert_dataframe_equality",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 131,
"usage_type": "call"
}
] |
257914739
|
#!/usr/bin/python
# -*- coding: utf8-*-
import requests
from bs4 import BeautifulSoup
import pandas as pd
import urllib
from datetime import datetime
import lxml
import json
import pathlib
# ########## 다음 키워드 뽑아오기 ################
# dlist = [] ## 다음 키워드 저장
# html = requests.get("https://www.daum.net").text
# soup = BeautifulSoup(html,'html.parser')
# title_list = soup.select(".list_mini .rank_cont .link_issue")
# ranking = soup.select(".list_mini .rank_cont .ir_wa")
# del dlist[:]
# for top in title_list:
# dlist.append(top.text)
# ##############################################
# ########### 네이버 키워드 뽑아오기 #############
# nlist = [] ## 네이버 키워드 저장
# html = requests.get('https://www.naver.com/').text
# soup = BeautifulSoup(html, 'html.parser')
# title_list = soup.select('.PM_CL_realtimeKeyword_rolling span[class*=ah_k]')
# nlist20 = []
# for i in title_list :
# nlist20.append(i.get_text())
# nlist = nlist20[:10]
# ##############################################
# ########## 키워드 통합 리스트 #################
# tlist = dlist + nlist
# ##############################################
dlist = ["가나", "다라", "조국"]
import pickle
import os
from datetime import datetime
def make_folder(folder_name) :
if not os.path.isdir(folder_name) :
os.mkdir(folder_name)
datetime = datetime.now()
day = "%02d%02d%02d" % (datetime.year, datetime.month, datetime.day)
today = day[2:8]
CurrentTime = "%02d%02d" % (datetime.hour, datetime.minute)
root_dir = "C:/after"
day_dir = root_dir + "/" + today
time_dir = day_dir + "/" + CurrentTime
make_folder(day_dir)
make_folder(time_dir)
for keyword in dlist :
nums = 0
key_dir = "D_K_%02d" % (nums + 1)
last_dir = time_dir + "/" + key_dir
make_folder(last_dir)
# DaumList = [] ## 다음 뉴스 저장
# del DaumList[:]
furl = "https://search.daum.net/search?w=news&sort=recency&q=" #url를 나눈다(page 1,2,3 넣고 keyword넣기 위해서)
surl = "&cluster=n&DA=STC&s=NS&a=STCF&dc=STC&pg=1&r=1&p="
lurl = "&rc=1&at=more&sd=&ed=&period="
# for keyword in dlist:
daumitem=[]
#한페이지당 10개의 뉴스가 있으므로 30개를 가지기 위해서 3까지
for i in range(3):
#검색할 주소 를 다 더해준다.
url = requests.get(furl + keyword + surl + str(i) + lurl).text
#검색
soup = BeautifulSoup(url,'html.parser')
#뉴스의 url주소를 가져온다.
urllink = soup.select("a[class *= f_link_b]")
#뉴스의 제목을 가져온다.
urlname = soup.select(".f_link_b")
#각각을 딕셔너리에 추가한다.
for list,list2 in zip(urllink,urlname):
daumitem.append({"keyword" : keyword,"title" : list2.text , "link" : list.get('href')})
# DaumList.append({"keyword" : keyword , "items" : daumitem})
# f = open(str(last_dir) + "/" + "save.txt", "w")
# f.write(daumitem)
#
# f.write(" ")
# f.close
file=open(str(last_dir) + "/" + "save.html","wb")
pickle.dump(daumitem, file)
file.close()
# for i in range(len(nlist)) :
# key_dir ="N_K_%02d" % (i+1)
# make_folder(time_dir + "/" + key_dir)
###############################################################################################
########## 다음 뉴스 검색하기 ##################
# DaumList = [] ## 다음 뉴스 저장
# furl = "https://search.daum.net/search?w=news&sort=recency&q=" #url를 나눈다(page 1,2,3 넣고 keyword넣기 위해서)
# surl = "&cluster=n&DA=STC&s=NS&a=STCF&dc=STC&pg=1&r=1&p="
# lurl = "&rc=1&at=more&sd=&ed=&period="
# del DaumList[:]
# for keyword in tlist:
# #데이터 초기화를 위해 for문 안에 daumitem배열 선언을 한다.
# daumitem=[]
# #한페이지당 10개의 뉴스가 있으므로 30개를 가지기 위해서 3까지
# for i in range(3):
# #검색할 주소 를 다 더해준다.
# url = requests.get(furl + keyword + surl + str(i) + lurl).text
# #검색
# soup = BeautifulSoup(url,'html.parser')
# #뉴스의 url주소를 가져온다.
# urllink = soup.select("a[class *= f_link_b]")
# #뉴스의 제목을 가져온다.
# urlname = soup.select(".f_link_b")
# #각각을 딕셔너리에 추가한다.
# for list,list2 in zip(urllink,urlname):
# daumitem.append({"title" : list2.text , "link" : list.get('href')})
# DaumList.append({"keyword" : keyword , "items" : daumitem})
# #############################################
# ########## 파일 저장 #########################
# PrintList = DaumList ## PrintList는 건들지말고 필요시 TotalList로 테스트
# with open('C:/after/out', 'w', encoding = "utf-8") as make_file:
# json.dump(PrintList, make_file, ensure_ascii = False, indent = "\t")
# ########## 네이버 뉴스 검색하기 ##########
# import os
# import sys
# import urllib.request
# client_id = "AcSs8vk1vXfmzpFkSX4h" ## 네이버 API id
# client_secret = "WBwj2IuI0D" ## 네이버 API secret
# listngo = tlist ## listngo는 건들지말고 필요시 tlist로 테스트
# NaverList = [] ## 네이버 뉴스 저장
# for keyword in listngo : ##len뒤에 값을 나중에 total_title로 가주자고
# encText = urllib.parse.quote(keyword) ##len뒤에 값을 나중에 total_title로 가주자고
# url = "https://openapi.naver.com/v1/search/news?query=" + encText + "&display=30&start=1&sort=sim" #display값이 뉴스갯수
# request = urllib.request.Request(url)
# request.add_header("X-Naver-Client-Id",client_id)
# request.add_header("X-Naver-Client-Secret",client_secret)
# response = urllib.request.urlopen(request)
# rescode = response.getcode()
# if(rescode==200):
# response_body = response.read()
# jsonlist = json.loads(response_body.decode('utf-8'))['items']
# TitleLink = []
# for i in jsonlist:
# TitleLink.append( {'title' : i['title'], 'link' : i['link']} )
# else:
# print("Error Code:" + rescode)
# NaverList.append({"keyword" : keyword , "items" : TitleLink})
# ##############################################
# ###############################################################################################
# ########## 파일 저장 #########################
# PrintList = TotalList ## PrintList는 건들지말고 필요시 TotalList로 테스트
# with open('C:/after/out', 'w', encoding = "utf-8") as make_file:
# json.dump(PrintList, make_file, ensure_ascii = False, indent = "\t")
| null |
crawling/chapter3/save2.py
|
save2.py
|
py
| 6,794 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.isdir",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.year",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.month",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.day",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.hour",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.minute",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 110,
"usage_type": "call"
}
] |
555534352
|
#vim: set fileencoding=utf-8
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.template import RequestContext, Template
from django.template import Context, loader
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from ctlweb.models import Components, Interfaces, Programmer
from django.contrib.auth.models import User
from django.conf import settings
import util
def lists(request,
direct_interfaces=None,
indirect_interfaces=None,
s_components=None,
form=0):
"""
Die Methode stellt die äußere Form der Componentdarstellungsseiten dar.
"""
# form:
# 0 = Verzeichnis
# 1 = Suche
if "ajax" in request.GET and request.GET["ajax"] == "true":
return new_page(request, direct_interfaces, indirect_interfaces,
s_components, form)
v_user = request.user
dict_response = dict()
dict_response["user"] = v_user
dict_response["form"] = form
dict_response["post_data"] = request.POST.urlencode()
context = RequestContext(request, dict_response)
return render_to_response("components.html", context_instance=context)
def new_page(request,
direct_interfaces=None,
indirect_interfaces=None,
s_components=None,
form=0):
""" this method handles the ajax-requests to dynamically reloading the new
pages"""
# form:
# 0 = Verzeichnis
# 1 = Suche
v_user = request.user
dict_response = dict()
if direct_interfaces == None :
direct_interfaces = Interfaces.objects.none()
else :
direct_interfaces = direct_interfaces.order_by('name')
if indirect_interfaces == None :
indirect_interfaces = Interfaces.objects.none()
else :
for d in direct_interfaces:
indirect_interfaces = indirect_interfaces.exclude(name = d.name)
indirect_interfaces = indirect_interfaces.order_by('name')
if s_components == None :
s_components = Components.objects.none()
else :
s_components = s_components.order_by('names').distinct()
s_components = s_components.exclude(is_active=False)
if form == 0:
direct_interfaces = Interfaces.objects.all().order_by('name')
s_components = \
Components.objects.filter(is_active=True).order_by('names')
interface_page_range = settings.PAGINATION_PAGE_RANGE_INTERFACES
components_page_range = settings.PAGINATION_PAGE_RANGE_COMPONENTS
button_range = settings.PAGINATION_BUTTON_RANGE
interfaces = direct_interfaces | indirect_interfaces
# get active pagenumbers
inter_page = request.GET.get('di_page', 1)
if not inter_page == "last":
try:
inter_page = int(inter_page)
except ValueError:
inter_page = 1
comp_page = request.GET.get('di_co_page', 1)
if not comp_page == "last":
try:
comp_page = int(comp_page)
except ValueError:
comp_page = 1
s_comp_page = request.GET.get('co_page', 1)
if not s_comp_page == "last":
try:
s_comp_page = int(s_comp_page)
except ValueError:
s_comp_page = 1
for inter in interfaces:
inter.is_direct = (inter in direct_interfaces)
components = inter.components.all()
if not inter.is_direct:
for c in inter.components.all():
if c not in s_components:
components = components.exclude(pk=c.pk)
components = components.exclude(is_active=False)
components = components.order_by('names').distinct()
pn_comp = Paginator(components, components_page_range)
if comp_page == "last":
comp_page = pn_comp.num_pages
try:
paged_components = pn_comp.page(comp_page)
except (EmptyPage, InvalidPage):
paged_components = pn_comp.page(pn_comp.num_pages)
inter.paged_components = paged_components
inter.page_buttons = util.generate_page_buttons("di_co_button_",
pn_comp.num_pages, "di_co_page", comp_page, button_range)
pn_interfaces = Paginator(interfaces, interface_page_range)
if inter_page == "last":
inter_page = pn_interfaces.num_pages
try:
paged_interfaces = pn_interfaces.page(inter_page)
except (EmptyPage, InvalidPage):
paged_interfaces = pn_interfaces.page(pn_interfaces.num_pages)
interfaces_page_buttons = util.generate_page_buttons("di_button_",
pn_interfaces.num_pages, "di_page", inter_page, button_range)
pn_comp = Paginator(s_components, components_page_range)
if s_comp_page == "last":
s_comp_page = pn_comp.num_pages
try:
s_paged_components = pn_comp.page(s_comp_page)
except (EmptyPage, InvalidPage):
s_paged_components = pn_comp.page(pn_comp.num_pages)
s_components_page_buttons = util.generate_page_buttons("co_button_",
pn_comp.num_pages, "co_page", s_comp_page, button_range)
view = request.GET.get('view', '')
see_ci = v_user.has_perm('ctlweb.can_see_ci')
see_description = v_user.has_perm('ctlweb.can_see_description')
dict_response["user"] = v_user
dict_response["form"] = form
dict_response["interfaces"] = paged_interfaces
dict_response["search_components"] = s_components
dict_response["paged_components"] = s_paged_components
dict_response["interface_page_buttons"] = interfaces_page_buttons
dict_response["s_components_page_buttons"] = s_components_page_buttons
dict_response["post_data"] = request.POST.urlencode()
dict_response["view"] = view
dict_response["see_ci"] = see_ci
dict_response["see_description"]= see_description
if "search_query" in request.GET:
dict_response["searchquery"] = request.GET.get('search_query', None)
context = RequestContext(request, dict_response)
return render_to_response("components_new_page.html",
context_instance=context)
| null |
src/frontend/app/ctlweb/views/lists.py
|
lists.py
|
py
| 6,148 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.template.RequestContext",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "ctlweb.models.Interfaces.objects.none",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "ctlweb.models.Interfaces.objects",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "ctlweb.models.Interfaces",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "ctlweb.models.Interfaces.objects.none",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "ctlweb.models.Interfaces.objects",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "ctlweb.models.Interfaces",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "ctlweb.models.Components.objects.none",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "ctlweb.models.Components.objects",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "ctlweb.models.Components",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "ctlweb.models.Interfaces.objects.all",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "ctlweb.models.Interfaces.objects",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "ctlweb.models.Interfaces",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "ctlweb.models.Components.objects.filter",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "ctlweb.models.Components.objects",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "ctlweb.models.Components",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.PAGINATION_PAGE_RANGE_INTERFACES",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.PAGINATION_PAGE_RANGE_COMPONENTS",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.PAGINATION_BUTTON_RANGE",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.EmptyPage",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.InvalidPage",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "util.generate_page_buttons",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.EmptyPage",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.InvalidPage",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "util.generate_page_buttons",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.EmptyPage",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.InvalidPage",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "util.generate_page_buttons",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "django.template.RequestContext",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 154,
"usage_type": "call"
}
] |
319436366
|
import unittest
from selenium import webdriver
from selenium.webdriver.common.by import By
from pages.main_page import MainPage
from argparser_dir.argparser_file import args
class MyTestCase(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
self.parser = args
def test_login_with_invalid_credentials(self):
"""
Try to login with wrong/non existing credentials --> verify error message is appear
"""
page_login = MainPage(self.driver).get_login_page()
page_login.open(page_login.url)
page_login.login(*page_login.invalid_user_credentials)
self.assertEqual(page_login.get_error_message, page_login.expected_error_message)
def test_registration_procedure(self):
"""
Verify notification about an existing user is apear
"""
register_page = MainPage(self.driver).get_register_page()
register_page.open(register_page.url)
register_page.register(first_name=self.parser.firstname,
last_name=self.parser.lastname,
login_name=self.parser.login,
password=self.parser.password,
number=self.parser.num)
self.assertEqual(register_page.phone_confirmation, register_page.expected_error_message)
def test_search_an_image_and_verify(self):
"""
Verify the search indeed find the value the user provided.
In this specific scenario - search for a dog, anv verify the fist img (from left side)
is --> Beagle Dog Wallpaper.
"""
image_page = MainPage(self.driver).get_images_page()
image_page.open(image_page.url)
image_page.search_an_image('dog')
element = image_page.find_element((By.CLASS_NAME, 'serp-item__thumb'))
self.assertEqual(element.get_attribute('alt'), 'Beagle Dog Wallpaper.')
def test_translate_word_from_english_to_spanish(self):
"""
Specific case to test:
change output language to Spanish
search the word hello
"""
trans_page = MainPage(self.driver).get_translate_page()
trans_page.open(trans_page.url)
trans_page.change_translation_language()
trans_page.translate_word('hello')
def test_search_for_video(self):
"""
Search for a video - e.g. UFC 01 and verify output.
"""
video_page = MainPage(self.driver).get_video_page()
video_page.open(video_page.url)
video_page.search_for_video('ufc 01')
VID_LOCATION = (By.XPATH, '/html/body/div[3]/div[1]/div[1]/div/div[2]/div/div/div[3]/div[2]/a')
ret = video_page.find_element(VID_LOCATION).text
self.assertEqual(ret, 'Epic Descent: Col de Joux Plane')
def tearDown(self):
self.driver.close()
if __name__ == '__main__':
unittest.main()
| null |
tests/test_verification_file.py
|
test_verification_file.py
|
py
| 2,920 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "unittest.TestCase",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "argparser_dir.argparser_file.args",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "pages.main_page.MainPage",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pages.main_page.MainPage",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pages.main_page.MainPage",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.common.by.By.CLASS_NAME",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "pages.main_page.MainPage",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pages.main_page.MainPage",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "unittest.main",
"line_number": 76,
"usage_type": "call"
}
] |
443249128
|
#!/usr/bin/python
# coding:utf-8
# https://gist.github.com/albertomontesg/d8b21a179c1e6cca0480ebdf292c34d2
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers.convolutional import Conv3D,Convolution3D, MaxPooling3D, ZeroPadding3D
from keras.optimizers import SGD
from keras import initializers
from keras.initializers import he_normal
import matplotlib.pyplot as plt
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers.normalization import BatchNormalization
from keras.constraints import maxnorm
import stcnnCBModel
#dataPath = '/net/xserve0/users/kojima/jikken/data/'
dataPath1 = '/media/aoki/559a0841-86d1-4a13-bad8-8cab1f416f10/kojima/data/shuffle1228/'
import keras.backend as K
def weighted_categorical_crossentropy(weights):
"""
A weighted version of keras.objectives.categorical_crossentropy
Variables:
weights: numpy array of shape (C,) where C is the number of classes
Usage:
weights = np.array([0.5,2,10]) # Class one at 0.5, class 2 twice the normal weights, class 3 10x.
loss = weighted_categorical_crossentropy(weights)
model.compile(loss=loss,optimizer='adam')
"""
weights = K.variable(weights)
print("---" + str(weights))
def loss(y_true, y_pred):
# scale predictions so that the class probas of each sample sum to 1
#y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
# clip to prevent NaN's and Inf's
y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
# calc
loss = y_true * K.log(y_pred) * weights
loss = -K.sum(loss, -1)
#loss = K.print_tensor(loss)
#print("-------loss--------"+ str(loss))
return loss
return loss
def generate_arrays_from_file():
i=0
itr=0
ad_epoch = 0
init_seed = 23
while 1:
X1_train = np.load(dataPath1+'/GS/X_train10_id' + str(i) + '.npy')
X2_train = np.load(dataPath1+'/depth/X_trainDP10_id' + str(i) + '.npy')
y_train = np.load(dataPath1+'/GS/y_train10_id' + str(i) + '.npy')
d,h,w=16,90,120
batch_size = 10
data_size = y_train.shape[0]
X1_train = X1_train.reshape(data_size,d,h,w,1)
X2_train = X2_train.reshape(data_size,d,h,w,1)
# print("---------------------")
# print("id" + str(i) )
# print(X1_train.shape)
# print(X2_train.shape)
# print(y_train.shape)
# print("---------------------")
# class_weight
# w = [0]*9
# for lb in range(0,9):
# num = len(np.where(y_train==lb)[0])
# if num != 0:
# w[lb] = float( len(y_train) / float( num ) )
# w[lb] = w[lb]
# else:
# w[lb] = 0
# print( str(lb) + "class:" + str(num) + ": " + str(w[lb] ) )
# prepreprocessing
nb_classes = 9
X1_train = X1_train.astype('float32')
X1_train /= 255.0
X2_train = X2_train.astype('float32')
X2_train /= 255.0
y_train = np_utils.to_categorical(y_train, nb_classes)
np.random.seed(init_seed + ad_epoch )
# data shuffle
ad_epoch = ad_epoch + 1
# data shuffle
shuffle_indices = np.random.permutation(np.arange(data_size))
X1_train = X1_train[shuffle_indices]
X2_train = X2_train[shuffle_indices]
y_train = y_train[shuffle_indices]
#Y_train = np_utils.to_categorical(y_train, 9)
#for (x,y) in zip(X_train,y_train):
for batch_num in range(int(data_size/batch_size) ):
# create numpy arrays of input data
# and labels, from each line in the file
startId = batch_num * batch_size
endId = min( (batch_num+1)*batch_size,data_size )
# print(str(endId) + "," + str(data_size) )
# if endId > data_size:
# print("------------------- !!!! ---------------------------")
# break
# print(startId)
# print(endId)
itr=itr+1
x1 = X1_train[startId:endId]
x2 = X2_train[startId:endId]
y = y_train[startId:endId]
yield ([x1,x2], y)
if i == 3:
# print("")
# print("---------------------")
# print("itration:"+str(itr))
# print("---------------------")
# print("")
itr=0
i=0
else:
i=i+1
del X1_train,X2_train
del y_train
#!/usr/bin/python
# coding:utf-8
import tensorflow as tf
#sess = tf.Session()
print("cnn_kscgr")
#from __future__ import print_function
import numpy as np
#import cv2
import os
import sys
import time
import numpy as np
#np.random.seed(1337) # for reproducibility
dataPath = '/media/aoki/559a0841-86d1-4a13-bad8-8cab1f416f10/kojima/data/samp10_2/'
print("------------------ load test_data ----------------------------")
# X_train = np.load('data_kscgr_15.npz')['X_train']
# y_train = np.load('data_kscgr_15.npz')['y_train']
# X_test = np.load('data_kscgr_15.npz')['X_test']
# y_test = np.load('data_kscgr_15.npz')['y_test']
# X1_test = np.load(dataPath + '/GS/X_train10_id4.npy')
# X2_test = np.load(dataPath + '/depth/X_trainDP10_id4.npy')
# y_test = np.load(dataPath+'/GS/y_train10_id4.npy')
# d,h,w=16,90,120
# data_size_test = y_test.shape[0]
# X1_test = X1_test.reshape(data_size_test,d,h,w,1)
# X2_test = X2_test.reshape(data_size_test,d,h,w,1)
# # X1_test = X1_test[::2]
# # X2_test = X2_test[::2]
# # y_test = y_test[::2]
# shuffle_id = np.random.permutation(np.arange(data_size_test))
# X1_test = X1_test[shuffle_id]
# X2_test = X2_test[shuffle_id]
# y_test = y_test[shuffle_id]
# print(X1_test.shape)
# print(X2_test.shape)
# print(y_test.shape)
# X1_test = X1_test.astype('float32')
# X1_test /= 255.0
# X2_test = X2_test.astype('float32')
# X2_test /= 255.0
# print(X1_test.shape[0], 'test samples')
from keras.utils import np_utils
nb_classes = 9 # 10→9
#Y_test = np_utils.to_categorical(y_test, nb_classes)
# ------------------------- create model -----------------------------------------------#
from keras.optimizers import SGD,Adam,Adagrad,Nadam
#optimizer = SGD(decay=1e-6, momentum=0.9, nesterov=True)
#optimizer = Adagrad(lr=0.1)
#optimizer = Adam(lr=0.01)
#opt=Adam(lr=0.001)
#opt=Adagrad(lr=0.01)
#opt = SGD(decay=1e-6, momentum=0.9, nesterov=True)
opt = Nadam()
# patience = 100
# early_stop = EarlyStopping('val_loss', patience=patience)
model = stcnnCBModel.getModel_tmp(summary=True)
#model.compile(loss='categorical_crossentropy',
# optimizer= opt ,
# metrics=['accuracy'])
# class_weight
# w = [0]*9
# for i in range(0,9):
# num = len(np.where(y_train==i)[0])
# w[i] = len(y_train)/ float( num )
# w[i] = w[i]
# print( w[i] )
#------------------------------ train and test -------------------------------------------#
# #不均衡データへの対応→class_weightをw_i = Nall / N_iでも設ける
# class_weight = {0 : 0.3,1 : 3.3, 2 : 0.9, 3 : 0.4,4 : 2.5,
# 5 : 1.0,6 : 1.4, 7 : 3.3, 8 : 1.7}
# #class_weight = {0 : 0.03,1 : 0.33, 2 : 0.09, 3 : 0.04,4 : 0.25, 5 : 0.1,6 : 0.14, 7 : 0.33, 8 : 0.17}
# # class_weight = {0:w[0],1:w[1],2:w[2],3:w[3],4:w[4],
# # 5:w[5],6:w[6],7:w[7],8:w[8]}
# class_weight = {0 : 3,1 : 33, 2 : 9, 3 : 4,4 : 25,
# 5 : 10,6 : 14, 7 : 33, 8 : 17}
# w = 2
# class_weight = {0 : 3,1 : 33*2, 2 : 9*2, 3 : 4*2,4 : 25*2,
# 5 : 10*2,6 : 14*2/2, 7 : 33*2, 8 : 17*2*2}
# class_weight = {0 : 100./31,1 : 100./3, 2 : 100./11, 3 : 100./25,4 : 100./4,
# 5 : 100./10,6 : 100./7, 7 : 100./3, 8 : 100./6}
# class_weight = {0 : 100./31,1 : 100./3, 2 : 100./11, 3 : 100./25,4 : 100./4,
# 5 : 100./10,6 : 100./7, 7 : 100./3, 8 : 100./6}
class_weight = np.array([100./31,100./3,100./11,100./25,100./4,100./10,100./7,100./3,100./6])
class_weight = class_weight/9
#class_weight = class_weight/np.sum(class_weight)
#class_weight = class_weight * 9
#val_class_weight = np.array([class_weight[i] for i in y_test])
#class_weight = {0 : 0.2,1 : 1, 2 : 1, 3 : 0.2,4 : 1,
# 5 : 1,6 : 1, 7 : 1, 8 : 1}
model.compile(loss=weighted_categorical_crossentropy(class_weight),
optimizer= opt ,
metrics=['accuracy'])
print(class_weight)
import keras.callbacks
f_log = './log'
f_model = './model'
tb_cb = keras.callbacks.TensorBoard(log_dir=f_log, histogram_freq=1)
cp_cb = keras.callbacks.ModelCheckpoint(filepath = os.path.join(f_model,'cnn_model{epoch:02d}-loss{loss:.2f}-acc{acc:.2f}-vloss{val_loss:.2f}-vacc{val_acc:.2f}.hdf5'), monitor='val_loss', verbose=0, save_best_only=True, mode='auto')
cbks = [cp_cb]
# history = model.fit(X_train, Y_train, batch_size=batch_size,class_weight= class_weight, epochs=epochs, callbacks = callbacks,
# verbose=1, validation_data=(X_test, Y_test))
#train_data_size = 5717
epochs=30
steps_per_epochs=1715
steps_per_epochs=1388
#histori = model.fit_generator(generate_arrays_from_file(),class_weight= class_weight, epochs=epochs,samples_per_epoch=1000,callbacks=callbacks,validation_data=(X_test, Y_test))
# hist = model.fit_generator(generate_arrays_from_file(),class_weight=class_weight,
# epochs=epochs,steps_per_epoch=steps_per_epochs,callbacks=cbks,verbose=1,
# validation_data=([X1_test,X2_test] , Y_test))
#hist = model.fit_generator(generate_arrays_from_file(),class_weight=class_weight,
# epochs=epochs,steps_per_epoch=steps_per_epochs,callbacks=cbks,verbose=1,
# validation_data=([X1_test,X2_test] , Y_test))
# from sklearn.metrics import confusion_matrix, classification_report
# score = model.evaluate([X1_test,X2_test], Y_test, verbose=1)
# predict = model.predict_classes([X1_test,X2_test])
# #print('Test score:', score[0])
# print('Test accuracy:', score[1])
# print ( confusion_matrix(y_test, predict) )
# # print ( classification_report(y_test, predict) )
# acc = hist.history["acc"]
# val_acc = hist.history['val_acc']
# loss = hist.history["loss"]
# val_loss = hist.history["val_loss"]
# np.savez('res.npz', acc=acc, val_acc=val_acc, loss=loss, val_loss=val_loss )
print('save the architecture of a model')
json_string = model.to_json()
open(os.path.join(f_model,'cnn_model.json'), 'w').write(json_string)
yaml_string = model.to_yaml()
open(os.path.join(f_model,'cnn_model.yaml'), 'w').write(yaml_string)
print('save weights')
model.save_weights(os.path.join(f_model,'cnn_model_weights.hdf5'))
| null |
hi_src/ST-CNN_ver2/savemodel.py
|
savemodel.py
|
py
| 10,680 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "keras.backend.variable",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "keras.backend.clip",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "keras.backend.epsilon",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "keras.backend.log",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "keras.backend.sum",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "keras.optimizers.Nadam",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "stcnnCBModel.getModel_tmp",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "keras.models.callbacks.TensorBoard",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "keras.models.callbacks",
"line_number": 270,
"usage_type": "attribute"
},
{
"api_name": "keras.models",
"line_number": 270,
"usage_type": "name"
},
{
"api_name": "keras.models.callbacks.ModelCheckpoint",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "keras.models.callbacks",
"line_number": 271,
"usage_type": "attribute"
},
{
"api_name": "keras.models",
"line_number": 271,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 271,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 310,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 312,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 314,
"usage_type": "attribute"
}
] |
400081428
|
from django.shortcuts import render, redirect
from chat.models import Room, Message
from django.http import HttpResponse, JsonResponse
from django.contrib.auth.forms import UserCreationForm
from .forms import CreateUserForm
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
# Create your views here.
def registerPage(request):
form = CreateUserForm()
if request.method == "POST":
form = CreateUserForm(request.POST)
if form.is_valid():
form.save()
user = form.cleaned_data.get('username')
messages.success(request,'Account was Created for '+user)
return redirect('login')
context = {'form':form}
return render(request, 'register.html',context)
def loginPage(request):
if request.method=='POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request,user)
return redirect('home')
else:
messages.info(request,'Username OR Password is Incorrect')
context = {}
return render(request, 'login.html',context)
def logoutUser(request):
logout(request)
return redirect('login')
@login_required(login_url='login')
def homePage(request):
context = {}
context['data'] = Room.objects.all()
return render(request,'home.html',context)
def room(request, room):
username = request.GET.get('username')
room_details = Room.objects.get(name=room)
return render(request,'room.html',{
'username':username,
'room':room,
'room_details':room_details
})
def checkview(request):
room = request.POST['room_name']
print("room------->",room)
username = request.POST['username']
print("username------>",username)
if Room.objects.filter(name=room).exists():
return redirect('/'+room+'/?username='+username)
else:
new_room = Room.objects.create(name=room)
new_room.save
return redirect('/'+room+'/?username='+username)
def send(request):
message = request.POST['message']
username = request.POST['username']
room_id = request.POST['room_id']
new_message = Message.objects.create(value=message,user=username,room=room_id)
new_message.save
return HttpResponse('Message sent successfully')
def getMessages(request, room):
room_details = Room.objects.get(name=room)
messages = Message.objects.filter(room=room_details.id)
return JsonResponse({'messages':list(messages.values())})
def leaveRoom(request):
return render(request,'home.html')
| null |
chat/views.py
|
views.py
|
py
| 2,781 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "forms.CreateUserForm",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "forms.CreateUserForm",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.info",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.logout",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "chat.models.Room.objects.all",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "chat.models.Room.objects",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "chat.models.Room",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "chat.models.Room.objects.get",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "chat.models.Room.objects",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "chat.models.Room",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "chat.models.Room.objects.filter",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "chat.models.Room.objects",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "chat.models.Room",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "chat.models.Room.objects.create",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "chat.models.Room.objects",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "chat.models.Room",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "chat.models.Message.objects.create",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "chat.models.Message.objects",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "chat.models.Message",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "chat.models.Room.objects.get",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "chat.models.Room.objects",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "chat.models.Room",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "chat.models.Message.objects.filter",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "chat.models.Message.objects",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "chat.models.Message",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.values",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 92,
"usage_type": "call"
}
] |
88871896
|
# -*- coding: utf-8 -*-
# @File : SimplePostPythonModule.py
# @Date : 2019/1/12
# @Desc :
import base64
import json
from Lib.ModuleAPI import *
class PostModule(PostMSFPythonWithParamsModule):
NAME = "内网Netbios&SMB扫描"
DESC = "通过 NBNS 协议获取 NetBIOS Name.\n" \
"通过 139(默认)或者 445 探测系统相关信息.\n"
MODULETYPE = TAG2CH.Discovery
PLATFORM = ["Windows", "Linux"] # 平台
PERMISSIONS = ["User", "Administrator", "SYSTEM", "Root"] # 所需权限
ATTCK = ["T1046"] # ATTCK向量
README = ["https://www.yuque.com/vipersec/module/wgghxf"]
REFERENCES = ["https://github.com/iiilin/inbtscan"]
AUTHOR = "Viper"
OPTIONS = register_options([
OptionStr(name='ipstr', name_tag="IP地址", required=True, desc="扫描IP地址列表(10.10.10.10,10.10.11-13,10.10.11.1/24)"),
OptionEnum(name='port',
name_tag="端口",
desc="139端口支持netbios+smb扫描,445端口支持smb扫描",
required=True,
default=139,
enum_list=[
{'name': "139", 'value': 139},
{'name': "445", 'value': 445},
]),
OptionInt(name='connect_time_out', name_tag="连接超时时间(毫秒)", desc="网络扫描过程中每个网络连接的超时时间,请依据主机内网网络环境进行调整(通常小于500ms)",
default=100),
OptionInt(name='timeout', name_tag="模块执行超时时间(秒)", desc="模块执行的超时时间", required=True, default=600),
# OptionInt(name='max_threads', name_tag="扫描线程数", desc="扫描线程数(最大值20)", default=10),
])
def __init__(self, sessionid, ipaddress, custom_param):
super().__init__(sessionid, ipaddress, custom_param)
self.set_script("inbt.py") # 设置目标机执行的脚本文件
def check(self):
"""执行前的检查函数"""
# session 检查
self.session = Session(self._sessionid)
if self.session.is_alive is not True:
return False, "当前session不可用"
# 参数检查
ipstr = self.param('ipstr')
timeout = self.param('timeout')
connect_time_out = self.param('connect_time_out')
# max_threads = self.param('max_threads')
try:
iplist = self.str_to_ips(ipstr)
if len(iplist) > 510:
return False, "扫描IP范围过大(超过510),请缩小范围"
elif len(iplist) < 0:
return False, "输入的IP地址格式有误,未识别到有效IP地址,请重新输入"
self.set_script_param('ipstr', ipstr)
except Exception as E:
return False, "输入的IP格式有误,请重新输入"
if self.param('port') not in [139, 445]:
self.set_script_param('port', 139)
else:
self.set_script_param('port', self.param("port"))
if timeout <= 0 or timeout > 3600:
return False, "输入的模块超时时间有误(最大值3600),请重新输入"
if connect_time_out <= 0 or connect_time_out > 3000:
return False, "输入的连接超时时间有误(最大值3000),请重新输入"
# if max_threads <= 0 or max_threads > 20:
# return False, "输入的扫描线程数有误(最大值20),请重新输入"
self.set_script_param('time_out', connect_time_out / 1000)
# self.set_script_param('max_threads', max_threads)
self.set_script_timeout(timeout)
# return False, None
return True, None
def callback(self, status, message, data):
if status:
try:
result = base64.b64decode(bytes(data, encoding="utf8")).decode('ascii')
portservice_list = json.loads(result)
except Exception as E:
self.log_error("脚本输出解析失败")
self.log_error(E)
self.log_error(data)
return
if len(portservice_list) == 0:
self.log_info("脚本执行完成,但是未扫描到有效数据,可能是由于对方网络关闭,请检查主机netstat信息后重试")
self.log_info("如果确认网络连接正常但扫描无结果,请使用Meterpreter命令行中的'重置python插件功能'重置后重新扫描")
return
self.log_info("扫描结果")
for portservice in portservice_list:
# 输出部分
ipaddress = portservice.get("ipaddress")
# 新增主机
result = self.add_host(ipaddress, source=self.host_ipaddress, linktype="scan",
data={"method": "netbios"})
portservice.pop("ipaddress") # 弹出ipaddress数据
HostInfo.update_info(ipaddress, portservice)
group = portservice.get("group")
unique = portservice.get("unique")
self.log_raw(f"{ipaddress} {group}/{unique}")
os_version = portservice.get("os_version")
major_version = portservice.get("major_version")
minor_version = portservice.get("minor_version")
build_number = portservice.get("bulid_number")
self.log_raw(os_version)
self.log_raw(f"Build Number: {build_number}")
self.log_raw(f"Major Version: {major_version}")
self.log_raw(f"Minor Version: {minor_version}")
ntlm_current_revision = portservice.get("ntlm_current_revision")
self.log_raw(f"Ntlm Current Revision: {ntlm_current_revision}")
name_list = portservice.get("name_list")
self.log_raw("\nNames:")
if isinstance(name_list, list):
for name in name_list:
self.log_raw(" ".join(name))
netbios_item = portservice.get("netbios_item")
self.log_raw("\nNetbios Item:")
if isinstance(netbios_item, list):
for netbios in netbios_item:
for key in netbios:
self.log_raw(f"{key}: {netbios[key]}")
self.log_raw("-----------------------------------------------\n\n")
else:
self.log_error("模块执行失败")
self.log_error(message)
| null |
MODULES/Discovery_NetworkServiceScanning_NbtScanByPython.py
|
Discovery_NetworkServiceScanning_NbtScanByPython.py
|
py
| 6,565 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "base64.b64decode",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 96,
"usage_type": "call"
}
] |
152914191
|
import csv
import random
from pathlib import Path
from typing import *
import cv2
import imutils
import numpy as np
import tqdm
SEED = 0xCAFFE
IOU_THRESHOLD = 0.3
py_rng = random.Random(SEED)
np_rng = np.random.RandomState(SEED)
MAX_TRIES = 50
MINIMAL_WIDTH = 5
MINIMAL_HEIGHT = 5
WIDTH_RANGE = (MINIMAL_WIDTH, 40)
HEIGHT_RANGE = (MINIMAL_HEIGHT, 40)
def load_annotation(p: Path) -> np.ndarray:
with open(p.as_posix(), 'r') as f:
reader = csv.reader(f)
all_boxes = []
for line in reader:
line = [i.strip('\ufeff').strip('\xef\xbb\xbf') for i in line]
x1, y1, x2, y2, x3, y3, x4, y4 = list(map(int, line[:8]))
left = min(x1, x2, x3, x4)
right = max(x1, x2, x3, x4)
top = min(y1, y2, y3, y4)
bottom = max(y1, y2, y3, y4)
all_boxes.append([left, top, right, bottom])
return np.asarray(all_boxes, dtype=np.float32)
def pre_process_image_and_annotation(img: np.ndarray, annotation: np.ndarray,
max_side_len: int) -> Tuple[np.ndarray, np.ndarray, float, float]:
o_h, o_w = img.shape[:2]
if o_h > o_w:
img = imutils.resize(img, height=max_side_len)
else:
img = imutils.resize(img, width=max_side_len)
n_h, n_w = img.shape[:2]
h_ratio = n_h / o_h
w_ratio = n_w / o_w
annotation = annotation.astype(np.float32)
annotation[:, 0] *= w_ratio
annotation[:, 2] *= w_ratio
annotation[:, 1] *= h_ratio
annotation[:, 3] *= h_ratio
return img, annotation.astype(np.int32), h_ratio, w_ratio
def create_probability_map(image: np.ndarray, window_size: int) -> np.ndarray:
output_h = image.shape[0] - window_size
output_w = image.shape[1] - window_size
output_map = np.zeros((output_h, output_w), dtype=np.float32)
for y in range(0, image.shape[0] - window_size, 1):
for x in range(0, image.shape[1] - window_size, 1):
sample = image[y:y + window_size, x:x + window_size]
output_map[y, x] = np.var(sample)
output_map = (output_map - output_map.min()) / (output_map.max() - output_map.min())
output_map = np.pad(output_map, [[window_size // 2, window_size // 2], [window_size // 2, window_size // 2]],
mode="constant", constant_values=0)
return output_map
def sample_box_parameters_from_image(image: np.ndarray, annotation: np.ndarray, coordinates: np.ndarray,
probability_map: np.ndarray,
num_samples: int) -> np.ndarray:
result = []
h, w = image.shape[:2]
f_proba_map = np.reshape(probability_map, (-1,))
f_proba_map = normalize_probabilities(f_proba_map)
f_coordinates = np.reshape(coordinates, (-1, 2))
indices = np.arange(0, len(f_proba_map))
selected_indices = np_rng.choice(indices, size=(MAX_TRIES,), replace=False, p=f_proba_map)
widths = np_rng.uniform(WIDTH_RANGE[0], WIDTH_RANGE[1], size=(MAX_TRIES,))
heights = np_rng.uniform(HEIGHT_RANGE[0], HEIGHT_RANGE[1], size=(MAX_TRIES,))
for i, index in enumerate(selected_indices):
x, y = f_coordinates[index]
width = widths[i]
height = heights[i]
x1 = x - width // 2
x2 = x + width // 2
y1 = y - height // 2
y2 = y + height // 2
x1 = max(0, x1)
x2 = min(w - 1, x2)
y1 = max(0, y1)
y2 = min(h - 1, y2)
box = np.asarray([x1, y1, x2, y2])
if x2 - x1 >= MINIMAL_WIDTH and y2 - y1 >= MINIMAL_HEIGHT and validate_box_with_annotations(box, annotation):
result.append(box)
if len(result) >= num_samples:
break
return np.asarray(result, dtype=np.int32)
def normalize_probabilities(probabilities: np.ndarray) -> np.ndarray:
return probabilities / probabilities.sum()
def validate_box_with_annotations(box: np.ndarray, annotations: np.ndarray) -> bool:
for ann in annotations:
iou = get_iou(box, ann)
if iou > IOU_THRESHOLD:
return False
return True
def get_coordinates(image: np.ndarray) -> np.ndarray:
h, w = image.shape[:2]
xs, ys = np.meshgrid(np.arange(0, w), np.arange(0, h))
coords = np.stack([xs, ys], axis=-1)
return coords
def get_iou(box1: np.ndarray, box2: np.ndarray) -> float:
x11, y11, x21, y21 = box1
x12, y12, x22, y22 = box2
intersection = (max(y11, y12) - min(y21, y22)) * (max(x11, x12) - min(x21, x22))
if intersection < 0:
return 0
union = (y21 - y11) * (x21 - x11) + (y22 - y12) * (x22 - x12) - intersection
return intersection / (union + 1e-8)
def extract_boxes(image: np.ndarray, box_parameters: np.ndarray, h_ratio: float, w_ratio: float) -> List[np.ndarray]:
result = []
for x1, y1, x2, y2 in box_parameters:
frame = image[y1:y2, x1:x2]
h_frame, w_frame = frame.shape[:2]
h_frame /= h_ratio
w_frame /= w_ratio
frame = cv2.resize(frame, (int(w_frame), int(h_frame)))
result.append(frame)
return result
def generate_samples(
input_folder: str,
output_folder: str,
window_size: int,
default_size: int,
num_samples: int,
samples_per_photo: int):
input_folder = Path(input_folder)
output_folder = Path(output_folder)
if output_folder.exists():
raise ValueError("Given folder already exists")
output_folder.mkdir()
image_files = list(input_folder.rglob("*.jpg"))
counter = 0
probability_map_cache = {}
coordinates_map_cache = {}
with tqdm.tqdm(total=num_samples) as pbar:
while counter < num_samples:
image_file: Path = py_rng.choice(image_files)
image = cv2.imread(image_file.as_posix())
boxes = load_annotation(image_file.with_suffix(".txt"))
image, boxes, h_ratio, w_ratio = pre_process_image_and_annotation(image, boxes, default_size)
if image_file in probability_map_cache:
probability_map = probability_map_cache[image_file]
else:
probability_map = create_probability_map(image, window_size)
probability_map_cache[image_file] = probability_map
img_h, img_w = image.shape[:2]
if (img_h, img_w) in coordinates_map_cache:
coordinates = coordinates_map_cache[(img_h, img_w)]
else:
coordinates = get_coordinates(image)
coordinates_map_cache[(img_h, img_w)] = coordinates
box_parameters = sample_box_parameters_from_image(image, boxes, coordinates, probability_map,
samples_per_photo)
extracted_boxes = extract_boxes(image, box_parameters, h_ratio, w_ratio)
for i, box in enumerate(extracted_boxes):
import matplotlib.pyplot as plt
plt.imshow(box)
plt.show()
cv2.imwrite((output_folder / "img_{}.jpg".format(i + counter)).as_posix(), box)
counter += len(extracted_boxes)
pbar.update(len(extracted_boxes))
if __name__ == '__main__':
import argparse
argument_parser = argparse.ArgumentParser(
description="Script for generating data for classifier for discriminating windows with no class ")
argument_parser.add_argument("--input_folder",
help="Folder path for jpgs and annotations files prepared for detection")
argument_parser.add_argument("--output_folder", help="Ex. 'no_class' for samples with no class")
argument_parser.add_argument("--window_size", help="Window size for calculating variance", type=int)
argument_parser.add_argument("--default_size",
help="Standard resize for img so all objects that have "
"to have same size will have the same size across images",
type=int)
argument_parser.add_argument("--num_samples", help="Number of samples to generate", type=int)
argument_parser.add_argument("--samples_per_photo", help="Number of samples to extract from single photo", type=int)
args = argument_parser.parse_args()
generate_samples(
args.input_folder,
args.output_folder,
args.window_size,
args.default_size,
args.num_samples,
args.samples_per_photo
)
| null |
src/dataset/create_no_sign_class_samples.py
|
create_no_sign_class_samples.py
|
py
| 8,470 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "random.Random",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.random.RandomState",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "csv.reader",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "imutils.resize",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "imutils.resize",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "numpy.int32",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "numpy.var",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.pad",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "numpy.reshape",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "numpy.meshgrid",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "cv2.imread",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 185,
"usage_type": "name"
},
{
"api_name": "cv2.imwrite",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 195,
"usage_type": "call"
}
] |
647634643
|
## 30 days classification
import os
import glob
from tqdm import tqdm
import pandas as pd
from sklearn.utils import resample
from sklearn.utils import shuffle
def clean_data(path):
print("process start..")
df_admi_table=pd.read_csv(f'{path}/AdmissionsCorePopulatedTable.txt',
names=['PatientID','AdmissionID','AdmissionStartDate','AdmissionEndDate'], skiprows=1, sep='\t')
df_admi_dia=pd.read_csv(f'{path}/AdmissionsDiagnosesCorePopulatedTable.txt',
names=['PatientID','AdmissionID','PrimaryDiagnosisCode','PrimaryDiagnosisDescription'], skiprows=1, sep='\t')
df_pop_table=pd.read_csv(f'{path}/PatientCorePopulatedTable.txt',
names=['PatientID','PatientGender','PatientDateOfBirth','PatientRace','PatientMaritalStatus','PatientLanguage','PatientPopulationPercentageBelowPoverty'], skiprows=1, sep='\t')
path_csv="data/process/csv_dir"
os.makedirs(path_csv, exist_ok = True)
pid=pd.unique(df_admi_table['PatientID']).tolist()
raw=pd.DataFrame({"PatientID":[],'AdmissionID':[],'AdmissionStartDate':[],'AdmissionEndDate':[],'new':[]})
for i in tqdm(pid):
_df=df_admi_table[df_admi_table['PatientID']==i]
_df=_df.sort_values(by = ['AdmissionID'])
if len(_df)<=1:
pass
else:
p=_df['AdmissionStartDate'].tolist()
p.append(p.pop(0))
_df['new']=p
_df[:-1].to_csv(f"{path_csv}/{i}.csv",index=False)
ar=[]
file=glob.glob(f"{path_csv}/*.csv")
for f in file:
ar.append(pd.read_csv(f))
df=pd.concat(ar)
df['AdmissionStartDate']= pd.to_datetime(df['AdmissionStartDate'])
df['AdmissionEndDate']= pd.to_datetime(df['AdmissionEndDate'])
df['new']= pd.to_datetime(df['new'])
#date
difference=(df['new'] - df['AdmissionEndDate'])
days=[]
for x in difference:
days.append(int(str(x).split(' ')[0]))
df['days']=days
df_=pd.merge(df, df_admi_dia, on=["PatientID", "AdmissionID"])
fps=pd.merge(df_pop_table, df_, on=["PatientID"])
fps.to_csv(f"data/process/final_{path.split('/')[-1]}.csv",index=False)
print('preprocessing done')
def lower_case(df):
for col in df.columns:
df[col]=df[col].astype(str)
df[col]=df[col].map(lambda x: x.lower())
return df
def preprocessing(df,days,feature):
df["days"]=df["days"].astype(int)
df['Target']=df['days']<=days
df['Target'].replace({False: 0, True: 1}, inplace=True)
#age
df['PatientDateOfBirth']= pd.to_datetime(df['PatientDateOfBirth'])
df['AdmissionEndDate']= pd.to_datetime(df['AdmissionEndDate'])
from dateutil.relativedelta import relativedelta
df['Age'] = [relativedelta(a, b).years for a, b in zip( df['AdmissionEndDate'],df['PatientDateOfBirth'])]
df.dropna(inplace=True)
df = shuffle(df)
return df[feature]
def upsample(train_df,size=1000):
# Separate majority and minority classes
df_majority = train_df[train_df.Target==False]
df_minority = train_df[train_df.Target==True]
# Upsample minority class
df_minority_upsampled = resample(df_minority,
replace=True, # sample with replacement
n_samples=size, # to match majority class
random_state=123) # reproducible results
# Combine majority class with upsampled minority class
df_upsampled = pd.concat([df_majority, df_minority_upsampled])
train_false = shuffle(train_df[train_df['Target']==False])
final_df=pd.concat([df_upsampled[df_upsampled['Target']==True],train_false[:size]])
train = shuffle(final_df)
return train
| null |
utility/.ipynb_checkpoints/preprocessing2-checkpoint.py
|
preprocessing2-checkpoint.py
|
py
| 3,798 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pandas.unique",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pandas.merge",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "pandas.merge",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "dateutil.relativedelta.relativedelta",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "sklearn.utils.shuffle",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "sklearn.utils.resample",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "sklearn.utils.shuffle",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "sklearn.utils.shuffle",
"line_number": 97,
"usage_type": "call"
}
] |
490801650
|
import logging
from logging.handlers import TimedRotatingFileHandler
import os
from app.flask_app import ROOT
def get_logger(name):
""" Ф-я создания логирования
name - название файла лога (для каждого handlera свое)
Выход: созданный логгер
"""
formatter = logging.Formatter(
fmt='%(asctime)s [%(levelname)-8s] [%(filename)s, line:%(lineno)d]: %(message)s',
datefmt='%d.%m.%Y %H:%M:%S')
handler = TimedRotatingFileHandler(os.path.join(ROOT, 'logs', name + '.log'),
when='W0',
backupCount=13)
handler.setFormatter(formatter)
handler.setLevel(logging.INFO)
logger = logging.getLogger(name)
logger.addHandler(handler)
return logger
| null |
tasks/utils/logger.py
|
logger.py
|
py
| 877 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.Formatter",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.handlers.TimedRotatingFileHandler",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "app.flask_app.ROOT",
"line_number": 18,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 23,
"usage_type": "call"
}
] |
623226672
|
import pytest
@pytest.mark.models
@pytest.mark.unit
@pytest.mark.incremental
class TestPageView:
model = None
data = {
'session_id': 'asjdkasdhjghj',
'url': 'any.com'
}
def test_create(self, database):
from geru.models.pageviews import PageView
page = PageView(**self.data)
database.add(page)
database.flush()
assert page.id is not None
assert page.date is not None
assert database.query(PageView).count() == 1
def test_to_dict(self, database):
from geru.models.pageviews import PageView
page = database.query(PageView).first()
d = page.to_dict()
for k in self.data.keys():
assert d[k] == self.data[k]
d = page.to_dict(exclude=['session_id'])
assert 'session_id' not in d
| null |
etc/geru_code/tests/models/test_pageviews.py
|
test_pageviews.py
|
py
| 830 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "geru.models.pageviews.PageView",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "geru.models.pageviews.PageView",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "geru.models.pageviews.PageView",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "pytest.mark",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark",
"line_number": 6,
"usage_type": "attribute"
}
] |
85690461
|
import sqlite3 # open-source sql built into python
# make a connection to a db
conn = sqlite3.connect('my_db') # conect to this db or create a db if doesnt exist and then connect to it
# create a cursor to access members of the db
curs = conn.cursor() # curs is a db access object
# some SQL statements to read data
# we will have a zoo table containing animals with name, count, cost, exhibit
#stmt_read = "SELECT * FROM zoo"
stmt_read = '''
SELECT exhibit, animal, count, cost
FROM zoo
'''
#WHERE exhibit like '%jungle%'
#ORDER BY exhibit, count
#'''
# execute the statement
curs.execute(stmt_read)
rows = curs.fetchall()
print(rows) # a list of tuples
for anim in rows:
print('There are {1} {0}s in the {2} exhibit. It costs €{3:.2f} to maintain each {0}.'.format(anim[1], anim[2], anim[0], anim[3]))
# tidy up
conn.close()
| null |
pyadvanced/some_db/208db_read.py
|
208db_read.py
|
py
| 871 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sqlite3.connect",
"line_number": 4,
"usage_type": "call"
}
] |
280588641
|
# -*- encoding: UTF-8 -*-
#!/usr/bin/python3.4
import sys
import subprocess
import datetime
def plot(lst):
i = 0
trying = 100
base_name = 'plot_pipe'
#print(lst)
while (i < trying):
try:
file_name = base_name+str(i)
f = open(file_name, 'w')
for x in lst:
f.write(str(x)+' ')
f.flush()
f.close()
args = ['java', '-jar', 'ploter.jar', file_name]
subprocess.call(args)
break
except FileExistsError:
i=i+1
if (i == trying):
print('Error to plot graphic.')
def plotm(lst1, lst2, lst3, mod):
i = 0
trying = 100
base_name = 'plot_pipe'+datetime.datetime.now().strftime('%d%m%Y%H%M%S')
file_name = base_name+str(i)
f = open(file_name, 'w')
for x in lst1:
f.write(str(x[0])+' '+str(x[1])+' ')
f.write('\n')
for x in lst2:
f.write(str(x[0])+' '+str(x[1])+' ')
f.write('\n')
for x in lst3:
f.write(str(x[0])+' '+str(x[1])+' ')
f.write('\n')
f.flush()
f.close()
args = ['java', '-jar', 'ploter.jar', file_name, str(mod)]
subprocess.call(args)
| null |
Cauchy_problem/pipe_ploter.py
|
pipe_ploter.py
|
py
| 1,000 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "subprocess.call",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "subprocess.call",
"line_number": 47,
"usage_type": "call"
}
] |
277091834
|
#!/usr/bin/env python
# coding: utf-8
# # Test for simulated EEG
#
# Authors : Guillaume Dumas
#
# Date : 2020-07-09
from pathlib import Path
from copy import copy
from collections import OrderedDict
import numpy as np
import scipy
from scipy.integrate import odeint
import matplotlib.pyplot as plt
import mne
from scipy.integrate import solve_ivp
from hypyp import utils, analyses
from mne.channels import find_ch_connectivity
def generate_virtual_epoch(epochs: mne.Epochs, W: np.ndarray, frequency_mean: float = 10, frequency_std: float = 0.2,
noise_phase_level: float = 0.005, noise_amplitude_level: float = 0.1) -> mne.Epochs:
"""
Generate epochs with simulated data using Kuramoto oscillators.
Arguments:
epoch: mne.Epochs
Epochs object to get epoch info structure
W: np.ndarray
Coupling matrix between the oscillators
frequency_mean: float
Mean of the normal distribution for oscillators frequency
frequency_std: float
Standart deviation of the normal distribution for oscillators frequency
noise_phase_level: float
Amount of noise at the phase level
noise_amplitude_level: float
Amount of noise at the amplitude level
Returns:
mne.Epochs
new epoch with simulated data
"""
n_epo, n_chan, n_samp = epochs.get_data().shape
sfreq = epochs.info['sfreq']
N = int(n_chan / 2)
Nt = n_samp * n_epo
tmax = n_samp / sfreq * n_epo # s
tv = np.linspace(0., tmax, Nt)
freq = frequency_mean + frequency_std * np.random.randn(n_chan)
omega = 2. * np.pi * freq
def fp(t, p):
p = np.atleast_2d(p)
coupling = np.squeeze((np.sin(p) * np.matmul(W, np.cos(p).T).T) - (np.cos(p) * np.matmul(W, np.sin(p).T).T))
dotp = omega - coupling + noise_phase_level * np.random.randn(n_chan) / n_samp
return dotp
p0 = 2 * np.pi * np.block([np.zeros(N) + np.random.rand(N) + 0.5, np.zeros(N) + np.random.rand(N) + 0.5]) # initialization
ans = solve_ivp(fun=fp, t_span=(tv[0], tv[-1]), y0=p0, t_eval=tv)
phi = ans['y'].T % (2 * np.pi)
eeg = np.sin(phi) + noise_amplitude_level * np.random.randn(*phi.shape)
simulation = epochs.copy()
simulation._data = np.transpose(np.reshape(eeg.T, [n_chan, n_epo, n_samp]), (1, 0, 2))
return simulation
def virtual_dyad(epochs,W, frequency_mean=10., frequency_std=0.2, noise_phase_level=0.005,
noise_amplitude_level=0.1):
n_epo, n_chan, n_samp = epochs.get_data().shape
sfreq = epochs.info['sfreq']
Nt = n_samp * n_epo
tmax = n_samp / sfreq * n_epo # s
tv = np.linspace(0., tmax, Nt)
freq = frequency_mean + frequency_std * np.random.randn(n_chan)
omega = 2. * np.pi * freq
def fp(p, t):
p = np.atleast_2d(p)
coupling = np.squeeze((np.sin(p) * np.matmul(W, np.cos(p).T).T) - (np.cos(p) * np.matmul(W, np.sin(p).T).T))
dotp = omega - coupling + noise_phase_level * np.random.randn(n_chan) / n_samp
return dotp
p0 = 2 * np.pi * np.block([np.zeros(N), np.zeros(N) + np.random.rand(N) + 0.5])
phi = odeint(fp, p0, tv) % (2 * np.pi)
eeg = np.sin(phi) + noise_amplitude_level * np.random.randn(*phi.shape)
simulation = epo_real.copy()
simulation._data = np.transpose(np.reshape(eeg.T, [n_chan, n_epo, n_samp]), (1, 0, 2))
return simulation
# Load data
montage = mne.channels.read_custom_montage('../syncpipeline/FINS_data/enobio32.locs')
info = mne.create_info(ch_names=montage.ch_names, sfreq=500, ch_types='eeg')
epo1 = mne.EpochsArray(data=np.empty((36, 32, 501)), info=info)
epo1.set_montage(montage)
epo2 = epo1.copy()
mne.epochs.equalize_epoch_counts([epo1, epo2])
sampling_rate = epo1.info['sfreq'] #Hz
# concatenate two datasets
epo_real = utils.merge(epoch_S1=epo1, epoch_S2=epo2)
# setting up parameters
n_chan = len(epo_real.ch_names)
# get channel locations
con, _ = find_ch_connectivity(epo1.info, 'eeg')
con = con.toarray()
N = int(n_chan/2)
A11 = 1 * np.ones((N, N))
A12 = 0 * np.ones((N, N))
A21 = 0 * np.ones((N, N))
A22 = 1 * np.ones((N, N))
# A11 = con
# A22 = con
W = np.block([[A11, A12], [A21, A22]])
W = 0.2 * W
# simulation params
frequency_mean = 10.
frequency_std = 0.2
noise_phase_level = 0.005
noise_amplitude_level = 0.1
# check simulated set
sim = generate_virtual_epoch(epochs=epo_real, frequency_mean=frequency_mean, frequency_std=frequency_std,
noise_phase_level=noise_phase_level, noise_amplitude_level=noise_amplitude_level, W=W)
# sim.plot(scalings=5, n_epochs=3, n_channels=62)
# plt.show()
"""
PLV
"""
modes = ['plv', 'ccorr', 'coh', 'imaginary_coh', 'envelope_corr', 'pow_corr']
# generate 20 simulated datasets, and average the results
for mode in modes:
cons = []
for i in range(20):
sim = generate_virtual_epoch(epochs=epo_real, frequency_mean=frequency_mean, frequency_std=frequency_std,
noise_phase_level=noise_phase_level,
noise_amplitude_level=noise_amplitude_level, W=W)
freq_bands = {'Alpha-Low': [8, 12]}
connectivity = analyses.pair_connectivity(data=[sim.get_data()[:,0:32,:], sim.get_data()[:,32:,:]],
sampling_rate=sampling_rate, frequencies=freq_bands, mode=mode) # data.shape = (2, n_epochs, n_channels, n_times).
cons.append(connectivity[0])
plt.figure()
plt.imshow(np.nanmean(np.array(cons), axis=0))
plt.title(mode)
| null |
tutorial/simulations.py
|
simulations.py
|
py
| 5,601 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "mne.Epochs",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "numpy.atleast_2d",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.squeeze",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.matmul",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "numpy.block",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.random.rand",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "scipy.integrate.solve_ivp",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "numpy.sin",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "numpy.transpose",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "mne.Epochs",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "numpy.atleast_2d",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "numpy.squeeze",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.matmul",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "numpy.block",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.random.rand",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "scipy.integrate.odeint",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "numpy.sin",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "numpy.transpose",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "mne.channels.read_custom_montage",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "mne.channels",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "mne.create_info",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "mne.EpochsArray",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "mne.epochs.equalize_epoch_counts",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "mne.epochs",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "hypyp.utils.merge",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "hypyp.utils",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "mne.channels.find_ch_connectivity",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "numpy.block",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "hypyp.analyses.pair_connectivity",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "hypyp.analyses",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "numpy.nanmean",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 168,
"usage_type": "name"
}
] |
55174180
|
from abc import abstractmethod
import random
import sys
import threading
from time import time
from spidev import spidev
from util.logger import Logger
try:
import RPi.GPIO as GPIO
except RuntimeError:
print('Error importing RPi.GPIO!'
' This is probably because you need superuser privileges.'
' You can achieve this by using \'sudo\' to run your script')
sys.exit(1)
except ImportError:
print('Be sure to run this on a Raspberry Pi B+ ;)')
print('Continuing anyway ...')
log = Logger(__name__).setup()
class ADC(threading.Thread):
"""
Helper class used to read data from a single ADC chip in a separate thread.
"""
def __init__(self, channel, listener):
super(ADC, self).__init__()
self.daemon = True
self.channel = channel
self.listener = listener
# GPIO trigger interrupt pin
self.TRIGGER = 22
# Set up SPI interface
self.init_spi()
# Set up GPIO interrupt
self.init_gpio()
def init_spi(self):
log.info('Initialising SPI interface')
self.spi = spidev.SpiDev()
self.spi.open(0, self.channel)
def init_gpio(self):
log.info('Initialising GPIO trigger')
GPIO.setmode(GPIO.BOARD)
# Set up falling edge detection on the trigger pin
GPIO.setup(self.TRIGGER, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def run(self):
"""
Called when the thread is started.
"""
GPIO.add_event_detect(self.TRIGGER, GPIO.FALLING,
callback=self.on_trigger,
bouncetime=20)
def on_trigger(self, channel):
"""
Called when the trigger pin is pulled low. Immediately read data from
the sensor.
"""
start = time()
# Read/write some dummy data for now
response = self.spi.xfer2([random.randint(0, 35),
random.randint(0, 35)])
end = time()
log.debug("Read event data in " + str((end - start) * 1000) + "ms")
# Invoke the callback listener
self.listener.on_event(response)
def cleanup(self):
log.info('Cleaning up GPIO')
GPIO.cleanup()
class EventListener(object):
"""
Defines the methods that should be implemented by a class wishing to
receive event updates from the ADC sensor.
"""
@abstractmethod
def on_event(self, event):
"""
Called when an incoming detection event occurs.
"""
pass
| null |
sensors/adc/adc.py
|
adc.py
|
py
| 2,563 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.exit",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "util.logger.Logger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "spidev.spidev.SpiDev",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "spidev.spidev",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.setmode",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.BOARD",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.setup",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.IN",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.PUD_UP",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.add_event_detect",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.FALLING",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO.cleanup",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",
"line_number": 88,
"usage_type": "name"
}
] |
288656633
|
import discord
from discord.ext import commands
class botinfo():
def __init__(self, client):
self.client = client
@commands.command()
async def botinfo(self, ctx, *, user: discord.Member=None):
embed=discord.Embed(title="informações do bot", description="essas são minha informações")
embed.set_thumbnail(url="https://cdn.discordapp.com/avatars/409821602012856321/8247dc94c2c4527db0058c898e82780d.webp?size=1024")
embed.add_field(name="Nome do bot", value="`-_-SkyBot-_-`", inline=True)
embed.add_field(name="Criado em", value="`12/12/2018`", inline=True)
embed.add_field(name="Meu id", value="`516991068521103372`", inline=True)
embed.add_field(name="meu dono", value="<@409821602012856321>", inline=True)
embed.add_field(name="Eu fui programado em", value="`Python 3.7.2` <:python:507486258184978443> ", inline=True)
embed.add_field(name="Meu ping é aproximadamente", value=f"`{int(self.client.latency * 1000)} ms`", inline=False)
embed.set_footer(text="Direitos reservador")
await ctx.send(embed=embed)
def setup(client):
print("[Comando botinfo] Carregado")
client.add_cog(botinfo(client))
| null |
cogs/botinfo.py
|
botinfo.py
|
py
| 1,225 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "discord.Member",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "discord.Embed",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 7,
"usage_type": "name"
}
] |
13981326
|
from copy import copy
from typing import Dict, Type, Any, Optional, TypeVar
from .common import Serializer, Parser, AbstractFactory
from .parsers import create_parser, get_lazy_parser
from .schema import Schema, merge_schema
from .serializers import create_serializer, get_lazy_serializer
from .type_detection import is_generic_concrete
from .naming import NameStyle
DEFAULT_SCHEMA = Schema[Any](
trim_trailing_underscore=True,
skip_internal=True,
only_mapped=False,
name_style=NameStyle.ignore,
)
class StackedFactory(AbstractFactory):
__slots__ = ("stack", "factory")
def __init__(self, factory):
self.stack = []
self.factory = factory
def parser(self, class_: Type):
if class_ in self.stack:
return get_lazy_parser(self.factory, class_)
self.stack.append(class_)
try:
return self.factory._parser_with_stack(class_, self)
finally:
self.stack.pop()
def serializer(self, class_: Type):
if class_ in self.stack:
return get_lazy_serializer(self.factory)
self.stack.append(class_)
try:
return self.factory._serializer_with_stack(class_, self)
finally:
self.stack.pop()
T = TypeVar("T")
class Factory(AbstractFactory):
__slots__ = ("default_schema", "debug_path", "schemas")
def __init__(self,
default_schema: Optional[Schema] = None,
schemas: Optional[Dict[Type, Schema]] = None,
debug_path: bool = False):
self.default_schema = merge_schema(default_schema, DEFAULT_SCHEMA)
self.debug_path = debug_path
self.schemas: Dict[Type, Schema] = {}
if schemas:
self.schemas.update({
type_: merge_schema(schema, self.default_schema)
for type_, schema in schemas.items()
})
def schema(self, class_: Type[T]) -> Schema[T]:
if is_generic_concrete(class_):
base_class = class_.__origin__ # type: ignore
else:
base_class = None
schema = self.schemas.get(class_)
if not schema:
if base_class:
schema = self.schemas.get(base_class)
schema = merge_schema(schema, self.default_schema)
self.schemas[class_] = schema
return schema
def parser(self, class_: Type[T]) -> Parser[T]:
return self._parser_with_stack(class_, StackedFactory(self))
def _parser_with_stack(self, class_: Type[T], stacked_factory: StackedFactory) -> Parser[T]:
schema = self.schema(class_)
if schema.get_parser is not None:
if schema.parser is not None:
raise TypeError("Schema can not have parser and get_parser at same time")
else:
new_schema = copy(schema)
new_schema.parser = schema.get_parser(class_, stacked_factory, self.debug_path)
new_schema.get_parser = None
self.schemas[class_] = new_schema
schema = new_schema
if not schema.parser:
schema.parser = create_parser(stacked_factory, schema, self.debug_path, class_)
return schema.parser
def serializer(self, class_: Type[T]) -> Serializer[T]:
return self._serializer_with_stack(class_, StackedFactory(self))
def _serializer_with_stack(self, class_: Type[T], stacked_factory: StackedFactory) -> Serializer[T]:
schema = self.schema(class_)
if schema.get_serializer is not None:
if schema.serializer is not None:
raise TypeError("Schema can not have serializer and get_serializer at same time")
else:
new_schema = copy(schema)
new_schema.serializer = schema.get_serializer(class_, stacked_factory, self.debug_path)
new_schema.get_serializer = None
self.schemas[class_] = new_schema
schema = new_schema
if not schema.serializer:
schema.serializer = create_serializer(stacked_factory, schema, self.debug_path, class_)
return schema.serializer
def load(self, data: Any, class_: Type[T]) -> T:
return self.parser(class_)(data)
def dump(self, data: T, class_: Type[T] = None) -> Any:
if class_ is None:
class_ = type(data)
return self.serializer(class_)(data)
| null |
dataclass_factory/factory.py
|
factory.py
|
py
| 4,441 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "schema.Schema",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "naming.NameStyle.ignore",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "naming.NameStyle",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "common.AbstractFactory",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "parsers.get_lazy_parser",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "typing.Type",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "serializers.get_lazy_serializer",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "typing.TypeVar",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "common.AbstractFactory",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "schema.Schema",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "schema.Schema",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "schema.merge_schema",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "schema.Schema",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "schema.merge_schema",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "typing.Type",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "type_detection.is_generic_concrete",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "schema.merge_schema",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "schema.Schema",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "common.Parser",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "schema.get_parser",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "schema.parser",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "copy.copy",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "schema.get_parser",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "schema.parser",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "schema.parser",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "parsers.create_parser",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "schema.parser",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "common.Parser",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "common.Serializer",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "schema.get_serializer",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "schema.serializer",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "copy.copy",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "schema.get_serializer",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "schema.serializer",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "schema.serializer",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "serializers.create_serializer",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "schema.serializer",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "common.Serializer",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 122,
"usage_type": "name"
}
] |
278766536
|
import Orange
from orangecontrib.associate.fpgrowth import *
from scipy.sparse import lil_matrix
import sys
def inferRules(content):
if content == 1:
set = "cars_proc.tab"
minsup = 0.2
minconf = 0.7
elif content == 0:
set = "voting.tab"
minsup = 0.5
minconf = 0.7
else:
print("Wrong input! Adios")
return
data = Orange.data.Table(set)
X, mapping = OneHot.encode(data, include_class=True)
itemsets = dict(frequent_itemsets(X, minsup))
class_items = {item
for item, var, _ in OneHot.decode(mapping, data, mapping)
if var is data.domain.class_var}
rules = [(P, Q, supp, conf)
for P, Q, supp, conf in association_rules(itemsets, minconf)
if len(Q) == 1 and Q & class_items]
names = {item: '{}={}'.format(var.name, val)
for item, var, val in OneHot.decode(mapping, data, mapping)}
for ante, cons, supp, conf in rules:
print(', '.join(names[i] for i in ante), '-->',
names[next(iter(cons))], supp, conf)
return
if __name__ == "__main__":
choice = input("Key in 1 for Car Evaluation, 0 for Voting Dataset: ")
try:
selection = int(choice)
except:
print("Not a valid input. Try Again!")
sys.exit()
inferRules(selection)
| null |
HW1_p3.py
|
HW1_p3.py
|
py
| 1,368 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "Orange.data.Table",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "Orange.data",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 48,
"usage_type": "call"
}
] |
202061974
|
import telebot
from telebot import types
TOKEN = '1017092229:AAGfakUe7TMHtP8MwTDhS6hYXD8wHEmK9X0' # bot token OKDA
bot = telebot.TeleBot(TOKEN)
#memes
def nice(message):
noice = open('img/noice.jpg', 'rb')
bot.send_photo(message.chat.id, noice)
bot.send_message(message.chat.id, "Держи, ёмаё.")
print("[LOG] Noice meme sent to", message.chat.id, "(Chat ID)")
| null |
config.py
|
config.py
|
py
| 398 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "telebot.TeleBot",
"line_number": 7,
"usage_type": "call"
}
] |
260084595
|
#!/usr/bin/env python
import argparse
import logging
import platform
import os
import subprocess
import time
import yaml
import uuid
def parse_config(config_file) :
logging.info("Loading configuration from %s" % config_file)
config = open(config_file, 'r')
return yaml.load(config)
def main():
# Parse command line arguments
parser = argparse.ArgumentParser(description='')
parser.add_argument("-c", action='store', dest='config',
help="Configuration file.")
parser.add_argument("-t", action='store', dest='test',
help="Don't submit the job to the batch.")
args = parser.parse_args()
# If a configuration file was not specified, warn the user and exit.
if not args.config :
parser.error('A configuration file needs to be specified.')
# Configure the logger
logging.basicConfig(format='[ production ][ %(levelname)s ]: %(message)s',
level=logging.DEBUG)
# Parse the configuration file.
logging.info('Parsing configuration located at %s' % args.config.strip())
config = parse_config(args.config)
jobs = 0
lhe_files = []
if 'Jobs' in config:
jobs = int(config['Jobs'])
logging.info('Submitting %s jobs.' % jobs)
elif 'LHE' in config:
file_list = config['LHE'].strip()
lhe_files = open(file_list, 'r')
else:
logging.info('Need to specify the number of jobs or a list of LHE files.')
exit()
# Check what distribution we are running on to determine what setup script
# to source. If it's a Centos7 machine, exit. Centos7 is currently no
# supported.
dist = platform.linux_distribution()[0]
if dist == "Red Hat Enterprise Linux Server":
env_script = '/nfs/slac/g/ldmx/software/setup.sh'
else:
logging.warning('CentOS7 is currently not supported')
exit()
#if 'EnvScript' in config: env_script = config['EnvScript'].strip()
#print '[ BSUB ] Environment script = %s' % env_script
# Setup all environmental variables.
command = ['bash', '-c', 'source %s && env' % (env_script)]
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
for line in proc.stdout:
(key, _, value) = line.partition('=')
os.environ[key] = value.strip()
proc.communicate()
# Detector that will be used to run.
detector = config['Detector'].strip()
logging.info('Using detector %s' % detector)
if 'Prefix' in config:
oprefix = config['Prefix'].strip()
else:
oprefix = ''
# Get the path where all files will be stored. If the path doesn't
# exist, create it.
odir = config['Output'].strip()
if not os.path.exists(odir):
logging.info('Output directory does not exist and will be created.')
os.makedirs(odir)
logging.info('All files will be save to %s' % odir)
macro = config['Macro'].strip()
logging.info('Using macro template %s' % macro)
rconfig = config['Config'].strip()
logging.info('Using config template %s' % rconfig)
run_script = '%s/run_prod.py' % os.getcwd()
batch_command = 'bsub -R "select[rhel60]" -q medium -W 2800'
if 'LHE' in config:
for lhe_path in lhe_files:
ofile = 'test'
command = 'python %s -d %s -p %s -o %s -m %s -c %s -l %s' % (run_script, detector, odir, ofile, macro, rconfig, lhe_path.strip())
if not args.test:
command = '%s %s' % (batch_command, command)
subprocess.Popen(command, shell=True).wait()
time.sleep(0.1)
elif jobs != 0:
for job in xrange(0, jobs):
ofile = "%s_%s" % (oprefix, str(uuid.uuid4())[:8])
#log_path = ofile + ".log"
#logging.info('Saving log to: %s' % log_path)
command = 'python %s -d %s -p %s -o %s -m %s -c %s' % (run_script, detector, odir, ofile, macro, rconfig)
if not args.test:
command = '%s %s' % (batch_command, command)
subprocess.Popen(command, shell=True).wait()
time.sleep(0.1)
if __name__ == "__main__" :
main()
| null |
production/bsub_prod.py
|
bsub_prod.py
|
py
| 4,216 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.info",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "yaml.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "platform.linux_distribution",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 125,
"usage_type": "call"
}
] |
603548270
|
import requests
import json
from urllib.parse import urljoin
import urllib3
class BaseClient(object):
def __init__(self, base_url, verify=False, ok_codes=(200, 201, 204), headers=None, auth=None):
self._base_url = base_url
self._verify = verify
self._ok_codes = ok_codes
self._headers = headers
self._auth = auth
self._session = requests.Session()
def _http_request(self, method, url_suffix, full_url=None, headers=None,
auth=None, json_data=None, params=None, data=None, files=None,
timeout=10, resp_type='json', ok_codes=None, **kwargs):
urllib3.disable_warnings()
try:
# Replace params if supplied
address = full_url if full_url else urljoin(self._base_url, url_suffix)
headers = headers if headers else self._headers
auth = auth if auth else self._auth
# Execute
res = self._session.request(
method,
address,
verify=self._verify,
params=params,
data=data,
json=json_data,
files=files,
headers=headers,
auth=auth,
timeout=timeout,
**kwargs
)
# Handle error responses gracefully
if not self._is_status_code_valid(res, ok_codes):
err_msg = 'Error in API call [{}] - {}' \
.format(res.status_code, res.reason)
try:
# Try to parse json error response
error_entry = res.json()
err_msg += '\n{}'.format(json.dumps(error_entry))
raise ConnectionError(err_msg)
except ValueError as exception:
raise (err_msg, exception)
resp_type = resp_type.lower()
try:
if resp_type == 'json':
return res.json()
if resp_type == 'text':
return res.text
if resp_type == 'content':
return res.content
return res
except ValueError as exception:
raise ('Failed to parse json object from response: {}'.format(res.content), exception)
except requests.exceptions.ConnectTimeout as exception:
err_msg = 'Connection Timeout Error - potential reasons might be that the Server URL parameter' \
' is incorrect or that the Server is not accessible from your host.'
raise (err_msg, exception)
except requests.exceptions.SSLError as exception:
err_msg = 'SSL Certificate Verification Failed - try selecting \'Trust any certificate\' checkbox in' \
' the integration configuration.'
raise (err_msg, exception)
except requests.exceptions.ConnectionError as exception:
# Get originating Exception in Exception chain
error_class = str(exception.__class__)
err_type = '<' + error_class[error_class.find('\'') + 1: error_class.rfind('\'')] + '>'
err_msg = '\nError Type: {}\nError Number: [{}]\nMessage: {}\n' \
'Verify that the server URL parameter' \
' is correct and that you have access to the server from your host.' \
.format(err_type, exception.errno, exception.strerror)
raise (err_msg, exception)
def _is_status_code_valid(self, response, ok_codes=None):
status_codes = ok_codes if ok_codes else self._ok_codes
if status_codes:
return response.status_code in status_codes
return response.ok
| null |
src/blueprints/common.py
|
common.py
|
py
| 3,783 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.Session",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "urllib3.disable_warnings",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "urllib.parse.urljoin",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "requests.exceptions",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "requests.exceptions",
"line_number": 71,
"usage_type": "attribute"
}
] |
116228920
|
#!/usr/bin/python
import os
import sys
import logging
import urllib.request
import json
import cv2
from concurrent.futures.thread import ThreadPoolExecutor
from progress.bar import Bar
from PIL import Image
import socket
# download function on multi-threads
def download(item_id, url, images_dir, bbox, crop):
if not os.path.exists(images_dir):
os.makedirs(images_dir)
try:
file_output = os.path.join(images_dir, str(item_id) + '.' + 'JPEG')
socket.setdefaulttimeout(15)
urllib.request.urlretrieve(url, file_output)
if (verify_image(file_output)):
image = cv2.imread(file_output)
image = image[bbox['top']:bbox['top'] + bbox['height'],
bbox['left']: bbox['left'] + bbox['width']] if (bbox is not None and crop) else image
cv2.imwrite(file_output, image)
else:
os.remove(file_output)
print('Remove '+file_output)
except:
# print("Unexpected error:", sys.exc_info()[0])
logging.error(sys.exc_info()[0])
# Function to verify image
def verify_image(image_file):
try:
img = Image.open(image_file)
img.verify()
except:
return False
return True
# Download images for each class
def read_class(class_name, max_num_samples, url_dict, images_dir, threads, is_retrieval, is_match=False):
if not is_retrieval:
domain = 'train_pairs_'
else:
domain = 'retrieval_' if not is_match else 'retrieval_with_match_'
file_loc = 'meta/json/' + domain + class_name + '.json'
with open(file_loc, 'r') as file_json:
meta_data = json.load(file_json)
images_list = []
bar = Bar('Downloading '+class_name.title(), max=(len(meta_data)
if (args.max_num_samples == None) or (args.max_num_samples > len(meta_data)) else args.max_num_samples), suffix='%(percent)d%%')
output_dir = os.path.join(images_dir, 'street', class_name) if not is_retrieval else os.path.join(images_dir, 'shop', class_name)
for i, data in enumerate(meta_data):
if max_num_samples != None and i >= max_num_samples:
break
photo_id = int(data['photo'])
url = url_dict[photo_id]
bbox = data['bbox'] if not is_retrieval else None
images_list.append(
{'item_id': photo_id, 'url': url, 'images_dir': output_dir, 'bbox': bbox})
if i % args.threads == 0:
with ThreadPoolExecutor(max_workers=args.threads) as executor:
for x in images_list:
executor.submit(
download, x['item_id'], x['url'], x['images_dir'], x['bbox'], args.crop)
images_list = []
bar.next()
bar.finish()
print('Downloaded ' + str(len(next(os.walk(output_dir))
[2])) + ' images for class ' + class_name)
def main(args):
print('Start downloading images from Street2Shop dataset...')
if args.log is not None:
logging.basicConfig(filename=args.log,
format='%(message)s', level=logging.ERROR)
# Read file that contains the urls
f = open(args.urls, 'r')
url_list = f.read().split('\n')
url_dict = dict([(int(line.split(',')[0]), line.split(',')[1])
for line in url_list])
f.close()
# Create the retrieval meta json files for matched shop images when necessary
if args.match:
for class_name in args.classes:
out_file = 'meta/json/' + 'retrieval_with_match_' + class_name + '.json'
if os.path.isfile(out_file):
break
with open('meta/json/' + 'train_pairs_' + class_name + '.json', 'r') as f:
dicts = json.load(f)
ci_pids = set()
for x in dicts:
ci_pids.add(x['product'])
with open('meta/json/' + 'retrieval_' + class_name + '.json', 'r') as f:
dicts = json.load(f)
inner_join_dicts = []
for x in dicts:
if x['product'] in ci_pids:
inner_join_dicts.append(x)
with open(out_file, 'w') as json_file:
json.dump(inner_join_dicts, json_file)
print('Created the retrieval meta json files for the matching {:^10} images in the shop domain.'.format(class_name))
print()
if 'street' in args.domains:
for class_name in args.classes:
read_class(class_name, args.max_num_samples,
url_dict, args.images_dir, args.threads, False)
if 'shop' in args.domains:
for class_name in args.classes:
read_class(class_name, args.max_num_samples,
url_dict, args.images_dir, args.threads, True, args.match)
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
all_classes = ['bags','belts','dresses','eyewear','footwear','hats','leggings','outerwear','pants','skirts','tops']
# Data handling parameters
parser.add_argument('--urls', dest='urls', type=str,
default=None, required=True, help='urls file')
parser.add_argument('--image_dir', dest='images_dir',
type=str, default='images', help='image directory')
parser.add_argument('--domains', nargs='+', dest='domains', type=str,
default='street', help='specific photo domains to download')
parser.add_argument('--match', dest='match',
action='store_true', help='download shop photos that have a matching street photo only')
parser.add_argument('--log', dest='log', type=str,
default=None, help='log errors')
parser.add_argument('--threads', dest='threads',
type=int, default=10, help='threads')
parser.add_argument('--classes', nargs='+', dest='classes', type=str,
default=all_classes, help='specific fashion classes to download')
parser.add_argument('--max_num_samples', dest='max_num_samples',
type=int, default=None, help='maximum number of samples')
parser.add_argument('--crop', dest='crop',
action='store_true', help='crop image based on given bounding box')
args = parser.parse_args()
main(args)
print('Finished')
exit(0)
| null |
download.py
|
download.py
|
py
| 6,593 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.exists",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "socket.setdefaulttimeout",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "urllib.request.request.urlretrieve",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "cv2.imread",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sys.exc_info",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "progress.bar.Bar",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "concurrent.futures.thread.ThreadPoolExecutor",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "logging.ERROR",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 130,
"usage_type": "call"
}
] |
571327733
|
# -*- coding: utf-8 -*-
import numpy as np
import math
import matplotlib.pyplot as plt
def sigmoid(x):
'''
:param x:
:return:
'''
if type(x)!=np.ndarray:
return 1/(1+math.exp(-x))
return 1/(1+np.exp(-x))
#激活函数的偏导数
def sigDer(x):
'''
:param x:
:return:
'''
return sigmoid(x)*(1-sigmoid(x))
if __name__ == "__main__":
#N, D_in, H, D_out = 64, 1000, 100, 10
# Create random input and output data
xs = np.array([[1.0, 1.0], [1.0, 0.0], [0.0, 1.0], [0.0, 0.0]])
ys = np.array([[1.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
#
# # Randomly initialize weights
w1 = np.random.random((2,2))
w2 = np.random.random((2,2))
# Create random input and output data
# xs = np.random.random((N, D_in))
# ys = np.random.random((N, D_out))
#
# # Randomly initialize weights
# w1 = np.random.random((D_in, H))
# w2 = np.random.random((H, D_out))
print(xs)
print("------------")
print(ys)
learning_rate = 0.005
losses = []
#learning_rate = 0.05
for step in range(1000):
# for i in range(len(xs)):
#计算h层输出
hin = xs.dot(w1)
#对h层激活
hout = sigmoid(hin)
#计算o层输出
oin = hout.dot(w2)
#对o层激活
out = sigmoid(oin)
y_pred = out
loss = np.square(y_pred - ys).sum()
if step%50==0:
losses.append(loss)
print(step, loss)
# Backprop to compute gradients of w1 and w2 with respect to loss
grad_y_pred = 2.0 * (y_pred - ys)
grad_w2 = hout.T.dot(grad_y_pred)
grad_h_relu = grad_y_pred.dot(w2.T)
grad_h = grad_h_relu.copy()
grad_h[hin < 0] = 0
grad_w1 = xs.T.dot(grad_h)
# Update weights
w1 -= learning_rate * grad_w1
w2 -= learning_rate * grad_w2
plt.plot(losses)
plt.show()
| null |
code/torchBp.py
|
torchBp.py
|
py
| 2,028 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.ndarray",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "math.exp",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.random.random",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.random",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "numpy.square",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 78,
"usage_type": "name"
}
] |
199685258
|
#!/usr/bin/python
from collections import Counter
def main():
print("How many employees will be playing?: ")
try:
count = int(input())
init(count)
except ValueError:
print("Not a number, Pleas try again")
main()
def init(count):
w , h = 8,count
master_list = []
powerball_list = []
counter = 0
players = [[0 for z in range(w)] for y in range(h)]
for x in range(0,count):
first_name = input("Enter your first name: ")
if first_name.isdigit():
print("not a valid string")
exit()
last_name = input("Enter your last name: ")
if first_name.isdigit():
print("not a valid string")
exit()
players[x][0] = first_name
players[x][1] = last_name
for e in range(2,7):
number =int(input("Please select a unique number, 1- 69: "))
if number not in players:
players[x][e] = number
master_list.append(number)
else:
print("no duplicates please")
powerball = int(input("Please select a powerball number,1-26: "))
print("\n")
players[x][7] = powerball
powerball_list.append(powerball)
counter += 1
for b in range(0,count):
for c in range(0,7):
print(players[b][c],end= " ")
print(''.join("powerball: " + str(players[b][7])))
data = Counter(powerball_list)
result = data.most_common(1)[0][0]
data_1 = Counter(master_list)
result_1 = data_1.most_common(5)
res_list = [x[0] for x in result_1]
print(''.join("Powerball winning number:" + str(res_list) + "Powerball: " + str(result)))
if __name__ =='__main__':main()
| null |
greenphire.py
|
greenphire.py
|
py
| 1,762 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "collections.Counter",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 59,
"usage_type": "call"
}
] |
538983773
|
"""
code to build MEDS files
"""
from __future__ import print_function
import json
import copy
import numpy
from numpy import where, zeros
# external requirements
import fitsio
# esutil is only needed for the Maker, so we will let
# it slide if it is missing
try:
import esutil as eu
have_esutil=True
except:
have_esutil=False
from .util import \
make_wcs_positions, \
get_meds_output_struct, \
get_meds_input_struct, \
get_image_info_struct, \
radec_to_uv, \
MEDSCreationError
from .bounds import Bounds
from .defaults import default_config, default_values
SUPPORTED_CUTOUT_TYPES = ['image','weight','seg','bmask']
class MEDSMaker(dict):
"""
Write MEDS files. See the docs at https://github.com/esheldon/meds
for a description of the format
parameters
-----------
obj_data: numpy array with fields
Required fields are are 'id','box_size','ra','dec'. For
data types, see meds.util.get_meds_input_dtype
image_info: numpy array with fields
Information for each image. For the required data type, see
the meds.util.get_image_info_dtype() function.
config: dict, optional
Optional configuration parameters. The available options
are given XXX
meta_data: numpy array with fields, optional
Optional meta data to write. This is typically a length
one array, but can be anything in principle.
"""
def __init__(self,
obj_data,
image_info,
config=None,
meta_data=None):
self._load_config(config)
self._set_extra_config()
# make copies since we may alter some things
self.image_info = image_info.copy()
self._set_meta_data(meta_data)
self._set_obj_data(obj_data)
self._force_box_sizes_even()
def write(self, filename):
"""
build the meds layout and write images
"""
self._build_meds_layout()
self._write_data(filename)
def _write_data(self, filename):
"""
run through and write cutouts from each SE file
for each image
Matt's notes
1) figure out which objs overlap
2) grab cutouts
3) set weight maps properly (zero out bad pixels, areas off the chip)
4) set bitmasks
5) grab seg maps
6) write to proper spot in each 1d image on disk
"""
print("opening output MEDS file: '%s'" % filename)
with fitsio.FITS(filename,'rw',clobber=True) as fits:
self.fits=fits
self._write_object_data()
self._write_image_info()
self._write_metadata()
self._reserve_mosaic_images()
self._write_cutouts('image')
self._write_cutouts('weight')
self._write_cutouts('seg')
self._write_cutouts('bmask')
print('output is in:',filename)
def _write_object_data(self):
"""
write the object data
"""
print('writing object_data')
self.fits.write(self.obj_data,
extname=self['object_data_extname'])
def _write_image_info(self):
"""
write the object data
"""
print('writing image_info')
self.fits.write(self.image_info,
extname=self['image_info_extname'])
def _write_metadata(self):
"""
write the object data
"""
print('writing metadata')
if self.meta_data is not None:
self.fits.write(self.meta_data,
extname=self['metadata_extname'])
def _reserve_mosaic_images(self):
"""
reserve space on disk for each mosaic image
"""
fits=self.fits
dims=[self.total_pixels]
if 'fpack_pars' in self:
header=self['fpack_pars']
else:
header=None
for type in self['cutout_types']:
print(' reserving %s mosaic' % type)
extname=self['%s_cutout_extname' % type]
dtype=self['%s_dtype' % type]
# this reserves space for the images and header,
# but no data is written
fits.create_image_hdu(
img=None,
dtype=dtype,
dims=dims,
extname=extname,
header=header,
)
# now need to write the header
fits[extname].write_keys(header, clean=False)
def _write_cutouts(self, cutout_type):
"""
write the cutouts for the specified type
"""
print('writing %s cutouts' % cutout_type)
obj_data=self.obj_data
nfile=self.image_info.size
nobj=obj_data.size
cutout_hdu = self._get_cutout_hdu(cutout_type)
for file_id in xrange(nfile):
pkey = '%s_path' % cutout_type
impath = self.image_info[pkey][file_id].strip()
print(' %d/%d %s %s' % (file_id+1,nfile,cutout_type,impath))
im_data = self._read_image(file_id, cutout_type)
if im_data is None:
print(' no %s specified for file' % cutout_type)
continue
for iobj in xrange(nobj):
ncut=obj_data['ncutout'][iobj]
for icut in xrange(ncut):
if obj_data['file_id'][iobj, icut] == file_id:
self._write_cutout(
iobj,
icut,
cutout_hdu,
im_data,
cutout_type,
)
def _write_cutout(self,
iobj, icut,
cutout_hdu,
im_data,
cutout_type):
"""
extract a cutout and write it to the mosaic image
"""
dims = im_data.shape
d=self.obj_data
orow = d['orig_start_row'][iobj,icut]
ocol = d['orig_start_col'][iobj,icut]
bsize = d['box_size'][iobj]
start_row = d['start_row'][iobj,icut]
orow_box, row_box = self._get_clipped_boxes(dims[0],orow,bsize)
ocol_box, col_box = self._get_clipped_boxes(dims[1],ocol,bsize)
read_im = im_data[orow_box[0]:orow_box[1],
ocol_box[0]:ocol_box[1]]
subim = zeros( (bsize,bsize), dtype=read_im.dtype)
subim += default_values[cutout_type]
subim[row_box[0]:row_box[1],
col_box[0]:col_box[1]] = read_im
cutout_hdu.write(subim, start=start_row)
def _get_clipped_boxes(self, dim, start, bsize):
"""
get clipped boxes for slicing
If the box size goes outside the dimensions,
trim them back
parameters
----------
dim: int
Dimension of this axis
start: int
Starting position in the image for this axis
bsize: int
Size of box
returns
-------
obox, box
obox: [start,end]
Start and end slice ranges in the original image
box: [start,end]
Start and end slice ranges in the output image
"""
# slice range in the original image
obox = [start, start+bsize]
# slice range in the sub image into which we will copy
box = [0, bsize]
# rows
if obox[0] < 0:
obox[0] = 0
box[0] = 0 - start
im_max = dim
diff= im_max - obox[1]
if diff < 0:
obox[1] = im_max
box[1] = box[1] + diff
return obox, box
def _get_cutout_hdu(self, cutout_type):
"""
get the cutout hdu object for the specified cutout type
"""
tkey = '%s_cutouts' % cutout_type
cutout_hdu = self.fits[tkey]
return cutout_hdu
def _read_image(self, file_id, cutout_type):
"""
read an image, performing manipulations for
some types
images are background subtracted if a background
file is specified. The image is zerod where the
bitmask is nonzero, if a bitmask file is specified.
Similarly, weights are zerod where the bitmask is set.
parameters
----------
file_id: int
The id into the image_info structure
cutout_type: string
'image','bkg','seg','bmask'
"""
im = self._read_one_image(file_id, cutout_type)
if cutout_type=='image':
bkg = self._read_one_image(file_id, 'bkg')
if bkg is not None:
im -= bkg
else:
print(' no background for image')
bmask = self._read_one_image(file_id, 'bmask')
if bmask is not None:
w=self._check_bad_bmask(bmask)
im[w] = 0.0
else:
print(' no bmask for image')
scale = self._get_scale(file_id)
im *= scale
elif cutout_type=='weight':
if 'min_weight' in self:
w=numpy.where(im < self['min_weight'])
if w[0].size > 0:
print(" setting",w[0].size,"weight values to zero")
im[w] = 0.0
bmask = self._read_one_image(file_id, 'bmask')
if bmask is not None:
w=self._check_bad_bmask(bmask)
im[w] = 0.0
else:
print(' no bmask for image')
scale = self._get_scale(file_id)
im *= (1.0/scale**2)
return im
def _check_bad_bmask(self, bmask):
"""
return indices with not-allowed bits set
"""
binv = self['bitmask_allowed_inv']
wbad = where( (bmask & binv) != 0)
if wbad[0].size != 0:
print(' found %d masked pixels' % wbad[0].size)
return wbad
def _read_one_image(self, file_id, cutout_type):
"""
read a single image, no manipulations done here
"""
info=self.image_info
pkey = '%s_path' % cutout_type
extkey = '%s_ext' % cutout_type
impath=info[pkey][file_id].strip()
ext = info[extkey][file_id]
if impath.lower() == 'none' or impath=='':
im=None
else:
if isinstance(ext, str):
ext = ext.strip()
with fitsio.FITS(impath) as fits:
im = fits[ext].read()
return im
def _get_scale(self, file_id):
"""
get the scale for the image if specified, else
return 1.0
"""
if 'scale' in self.image_info.dtype.names:
return self.image_info['scale'][file_id]
else:
return 1.0
def _build_meds_layout(self):
"""
build the object data, filling in the stub we read
note position offsets appear nowhere in this function
"""
# box sizes are even
half_box_size = self.obj_data['box_size']//2
obj_data=self.obj_data
nim = self.image_info.size
nobj = obj_data.size
for file_id in xrange(nim):
self._get_wcs(file_id)
impath=self.image_info['image_path'][file_id].strip()
position_offset=self.image_info['position_offset'][file_id]
print("file %4d of %4d: '%s'" % (file_id+1,nim,impath))
wcs = self._get_wcs(file_id)
# monkey patching in the position_offset into wcs
wcs.position_offset=position_offset
q = self._do_rough_sky_cut(wcs, obj_data['ra'], obj_data['dec'])
print(' first cut: %6d of %6d objects' % (len(q),nobj))
# this is the bottleneck
pos = self._do_sky2image(wcs,
obj_data['ra'][q],
obj_data['dec'][q])
# now test if in the actual image space. Bounds are created
# in the offset coords
bnds = self._get_image_bounds(wcs)
# for coadds add buffer if requested
if file_id == 0:
bnds.rowmin -= self['coadd_bounds_buffer_rowcol']
bnds.rowmax += self['coadd_bounds_buffer_rowcol']
bnds.colmin -= self['coadd_bounds_buffer_rowcol']
bnds.colmax += self['coadd_bounds_buffer_rowcol']
# do the test
in_bnds = bnds.contains_points(pos['zrow'], pos['zcol'])
q_rc, = numpy.where(in_bnds == True)
print(' second cut: %6d of %6d objects' % (len(q_rc),len(q)))
# for coadds remove the buffer
if file_id == 0:
bnds.rowmin += self['coadd_bounds_buffer_rowcol']
bnds.rowmax -= self['coadd_bounds_buffer_rowcol']
bnds.colmin += self['coadd_bounds_buffer_rowcol']
bnds.colmax -= self['coadd_bounds_buffer_rowcol']
# force into the image if requested
if file_id == 0 and self['force_into_coadd_bounds']:
# for debugging
print(" pre-forced obj row range (min, max - image row max): % e % e" \
% (numpy.min(pos['zrow'][q_rc]),numpy.max(pos['zrow'][q_rc]-bnds.rowmax)))
print(" pre-forced obj col range (min, max - image col max): % e % e" \
% (numpy.min(pos['zcol'][q_rc]),numpy.max(pos['zcol'][q_rc]-bnds.colmax)))
rn = numpy.clip(pos['zrow'][q_rc], bnds.rowmin, bnds.rowmax)
cn = numpy.clip(pos['zcol'][q_rc], bnds.rowmin, bnds.rowmax)
num_forced = len(numpy.where((rn != pos['zrow'][q_rc]) | (cn != pos['zcol'][q_rc]))[0])
pos['zrow'][q_rc] = rn
pos['zcol'][q_rc] = cn
del rn
del cn
# for debugging
print(" post-forced obj row range (min, max - image row max): % e % e" \
% (numpy.min(pos['zrow'][q_rc]),numpy.max(pos['zrow'][q_rc]-bnds.rowmax)))
print(" post-forced obj col range (min, max - image col max): % e % e" \
% (numpy.min(pos['zcol'][q_rc]),numpy.max(pos['zcol'][q_rc]-bnds.colmax)))
print(" # of objects forced into coadd: %d" % num_forced)
# make sure stuff that is forced made it
in_in_bnds = bnds.contains_points(pos['zrow'][q_rc], pos['zcol'][q_rc])
if not numpy.all(in_in_bnds):
raise MEDSCreationError("Not all objects were found in first "
"image for MEDS making (which is the "
"coadd/detection image by convention) "
"after being forced into its bounds.")
# now make sure everything is there
if file_id == 0 and len(obj_data['ra']) != len(q_rc):
raise MEDSCreationError('Not all objects were found in first image for '
'MEDS making (which is the coadd/detection '
'image by convention).')
# compose them
q = q[q_rc]
# fill in the object_data structure
# note q_rc since pos was created using obj_data[q]
qrow = pos['zrow'][q_rc]
qcol = pos['zcol'][q_rc]
icut = obj_data['ncutout'][q]
obj_data['file_id'][q,icut] = file_id
obj_data['orig_row'][q,icut] = qrow
obj_data['orig_col'][q,icut] = qcol
#ostart_row = numpy.floor(qrow) - half_box_size[q]
#ostart_col = numpy.floor(qcol) - half_box_size[q]
# to be consistent with the C++ code
ostart_row = qrow.astype('i4') - half_box_size[q]
ostart_col = qcol.astype('i4') - half_box_size[q]
crow = qrow - ostart_row
ccol = qcol - ostart_col
obj_data['orig_start_row'][q,icut] = ostart_row
obj_data['orig_start_col'][q,icut] = ostart_col
obj_data['cutout_row'][q,icut] = crow
obj_data['cutout_col'][q,icut] = ccol
# do jacobian, in original, not-offset coords
# note q_rc since pos was created using obj_data[q]
jacob = wcs.get_jacobian(pos['wcs_row'][q_rc],
pos['wcs_col'][q_rc])
# jacob is a tuple of arrays
obj_data['dudcol'][q,icut] = jacob[0]
obj_data['dudrow'][q,icut] = jacob[1]
obj_data['dvdcol'][q,icut] = jacob[2]
obj_data['dvdrow'][q,icut] = jacob[3]
# increment
obj_data['ncutout'][q] += 1
self.obj_data = self._make_resized_data(obj_data)
self._set_start_rows_and_pixel_count()
def _set_start_rows_and_pixel_count(self):
"""
set the total number of pixels in each mosaic
"""
print('setting start rows and pixel count')
data=self.obj_data
nobj=data.size
npix = (data['ncutout']*data['box_size']**2).sum()
self.total_pixels = npix
npix=0
current_row = 0
for iobj in xrange(nobj):
ncut = data['ncutout'][iobj]
if ncut > 0:
bsize=data['box_size'][iobj]
npix_per_cutout = bsize*bsize
for icut in xrange(ncut):
data['start_row'][iobj,icut] = current_row
current_row += npix_per_cutout
npix += npix_per_cutout
if self.total_pixels != npix:
raise ValueError("total_pixels %d != "
"npix %d" % (self.total_pixels, npix))
print('total pixels:',self.total_pixels)
def _get_wcs(self, file_id):
"""
either load the wcs from the image_info, or from
the image header
"""
if 'wcs' in self.image_info.dtype.names:
wcs_string = self.image_info['wcs'][file_id]
wcs_data = json.loads(wcs_string)
else:
impath=self.image_info['image_path'][file_id].strip()
ext=self.image_info['image_ext'][file_id]
wcs_data = fitsio.read_header(impath, ext=ext)
wcs = eu.wcsutil.WCS(wcs_data)
return wcs
def _make_resized_data(self, odata):
"""
make a new struct with ncutout-sized-arrays based on
the actual maximum ncutout
"""
nmax = odata['file_id'].shape[1]
new_nmax = odata['ncutout'].max()
if new_nmax < 2:
new_nmax = 2
temp_obj_data = odata
nobj = temp_obj_data.size
new_data = get_meds_output_struct(nobj, new_nmax,
extra_fields=self['extra_fields'])
tmpst = get_meds_output_struct(1, new_nmax)
required_fields = tmpst.dtype.names
for name in new_data.dtype.names:
if name in temp_obj_data.dtype.names:
lshape = len(new_data[name].shape)
if lshape > 1 and name in required_fields:
new_data[name][:,:] = temp_obj_data[name][:,0:new_nmax]
else:
new_data[name][:] = temp_obj_data[name][:]
del temp_obj_data
return new_data
def _do_sky2image(self, wcs, ra, dec):
"""
get image positions for the input radec. returns a structure
with both wcs positions and zero offset positions
"""
col,row = wcs.sky2image(ra,dec)
positions = make_wcs_positions(row, col, wcs.position_offset)
return positions
def _do_rough_sky_cut(self, wcs, ra, dec):
"""
rough sky bounds cut
"""
sky_bnds,ra_ccd,dec_ccd = self._get_rough_sky_bounds(wcs)
u,v = radec_to_uv(ra,dec,ra_ccd,dec_ccd)
in_sky_bnds = sky_bnds.contains_points(u, v)
q, = numpy.where(in_sky_bnds == True)
return q
def _get_rough_sky_bounds(self, wcs, order=4):
"""
rough sky bounds for precut
wcs: is the wcs object that defines the transformation
order: order of grid to use in small direction to construct
bounding box in ra-dec
algorithm due to M. Jarvis w/ some changes from M. R. Becker
"""
ncol, nrow = wcs.get_naxis()
# set order so that pixels are square-ish
if ncol < nrow:
order_col = order
order_row = numpy.ceil(float(nrow)/float(ncol))
else:
order_row = order
order_col = numpy.ceil(float(ncol)/float(nrow))
# construct a grid - trying to be pythonic,
# but a double loop would be clearer
rows = numpy.arange(order_row+1)*(nrow-1.0)/order_row
cols = numpy.arange(order_col+1)*(ncol-1.0)/order_col
rows,cols = numpy.meshgrid(rows,cols)
rows = rows.ravel()
cols = cols.ravel()
# get ra,dec
pos = make_wcs_positions(rows, cols, wcs.position_offset, inverse=True)
ra,dec = wcs.image2sky(pos['wcs_col'], pos['wcs_row'])
# get ccd center
row_ccd = nrow/2.0
col_ccd = ncol/2.0
pos_ccd = make_wcs_positions(row_ccd, col_ccd, wcs.position_offset, inverse=True)
ra_ccd,dec_ccd = wcs.image2sky(pos_ccd['wcs_col'][0], pos_ccd['wcs_row'][0])
# get u,v - ccd is at 0,0 by def
u,v = radec_to_uv(ra,dec,ra_ccd,dec_ccd)
# build bounds with buffer and cos(dec) factors
vrad = numpy.deg2rad(v/3600.0) # arcsec to degrees
ufac = numpy.cos(vrad).min()
ubuff = self['bounds_buffer_uv']/ufac
vbuff = self['bounds_buffer_uv']
sky_bnds = Bounds(u.min() - ubuff,
u.max() + ubuff,
v.min() - vbuff,
v.max() + vbuff)
"""
OLD CODE - keeping here for now
# corners in default coord. system
rows = numpy.array([0.0, 0.0, nrow-1, nrow-1])
cols = numpy.array([0.0, ncol-1, 0.0, ncol-1])
pos=make_wcs_positions(rows, cols, wcs.position_offset, inverse=True)
ra,dec = wcs.image2sky(pos['wcs_col'], pos['wcs_row'])
decrad = numpy.deg2rad(dec)
rafac = numpy.cos(decrad).min()
rabuff = self['bounds_buffer_radec']/rafac
decbuff = self['bounds_buffer_radec']
sky_bnds = Bounds(ra.min() - rabuff,
ra.max() + rabuff,
dec.min() - decbuff,
dec.max() + decbuff)
"""
return sky_bnds,ra_ccd,dec_ccd
def _get_image_bounds(self, wcs):
"""
separate out so we can make changes to offset code without
altering calling function
"""
ncol, nrow = wcs.get_naxis()
rvals = numpy.array([1.0, nrow])
cvals = numpy.array([1.0, ncol])
pos = make_wcs_positions(rvals, cvals, wcs.position_offset)
bnds = Bounds(pos['zrow'][0],
pos['zrow'][1],
pos['zcol'][0],
pos['zcol'][1])
return bnds
def _force_box_sizes_even(self):
"""
box sizes are required to be even for MEDS files
The DES maker will only make even box sizes, but eventually
we will move this into the more general MEDSMaker that will
take the catalogs as input
"""
w,=numpy.where( (self.obj_data['box_size'] % 2) != 0)
if w.size > 0:
self.obj_data['box_size'][w] += 1
def _set_cutout_types(self):
cutout_types = copy.deepcopy(self['cutout_types'])
# make sure 'image' is at the front
if 'image' in cutout_types:
cutout_types.remove('image')
cutout_types = ['image'] + cutout_types
bad_types=[]
for ctype in cutout_types:
if ctype not in SUPPORTED_CUTOUT_TYPES:
bad_types.append(ctype)
if len(bad_types) != 0:
st=', '.join(bad_types)
raise ValueError("unsupported cutout types: '%s'" % st)
self['cutout_types'] = cutout_types
print('writing cutouts for:',cutout_types)
def _set_meta_data(self, meta_data_in):
"""
add some fields to the input metadata for software versions
"""
version_fmt = 'S20'
numpy_version=numpy.__version__
esutil_version=eu.__version__
fitsio_version=fitsio.__version__
if meta_data_in is not None:
mnames=meta_data_in.dtype.names
mdt = copy.deepcopy( meta_data_in.dtype.descr )
nmeta=meta_data_in.size
else:
mnames=[]
mdt = []
nmeta=1
for n in ['numpy','esutil','fitsio']:
vname = '%s_version' % n
if vname not in mnames:
mdt += [(vname,version_fmt)]
meta_data = zeros(nmeta, dtype=mdt)
if meta_data_in is not None:
eu.numpy_util.copy_fields(meta_data_in, meta_data)
meta_data['numpy_version'] = numpy_version
meta_data['esutil_version'] = esutil_version
meta_data['fitsio_version'] = fitsio_version
self.meta_data=meta_data
def _set_obj_data(self, obj_data):
"""
copy the input data into a full object_data structure.
check for required fields
"""
self._check_required_obj_data_fields(obj_data)
self.obj_data = self._get_full_obj_data(obj_data)
def _check_required_obj_data_fields(self, obj_data):
"""
make sure the input structure has the required fields
"""
min_st = get_meds_input_struct(1)
missing=[]
for name in min_st.dtype.names:
if name not in obj_data.dtype.names:
missing.append(name)
if len(missing) > 0:
missing=', '.join(missing)
raise ValueError("missing fields from obj_data: '%s'" % missing)
def _get_full_obj_data(self, obj_data):
"""
make a full object structure, adding in any extra fields from the
input structure. Copy over the common fields
"""
nmax = self.image_info.size
if nmax < 2:
nmax = 2
self._set_extra_fields(obj_data, nmax)
nobj = obj_data.size
new_obj_data = \
get_meds_output_struct(nobj, nmax,
extra_fields=self['extra_fields'])
eu.numpy_util.copy_fields(obj_data, new_obj_data)
return new_obj_data
def _set_extra_fields(self, obj_data, nmax):
"""
determine the tags in obj_data but not in the required
fields for the output object_data
"""
full_st = get_meds_output_struct(1, nmax)
extra_fields = []
for dt in obj_data.dtype.descr:
name=dt[0]
if name not in full_st.dtype.names:
extra_fields.append(dt)
self['extra_fields'] = extra_fields
def _set_image_info(self, image_info):
"""
set the image info and check for required fields
"""
self._check_image_info(image_info)
self.image_info = image_info.copy()
def _check_image_info(self, image_info):
"""
check required fields
currently just make sure the structure is exactly
like that in get_image_info_dtype
"""
plen=2
dt = numpy.dtype(get_image_info_dtype(plen))
missing=[]
for name in dt.names:
if name not in image_info.dtype.names:
missing.append(name)
if len(missing) > 0:
s=', '.join(missing)
raise ValueError("missing image_info entries: '%s'" % s)
def _set_extra_config(self):
"""
set extra configuration parameters that are not user-controlled
"""
self['object_data_extname'] = 'object_data'
self['image_info_extname'] = 'image_info'
self['metadata_extname'] = 'metadata'
self['image_cutout_extname'] = 'image_cutouts'
self['weight_cutout_extname'] = 'weight_cutouts'
self['seg_cutout_extname'] = 'seg_cutouts'
self['bmask_cutout_extname'] = 'bmask_cutouts'
def _load_config(self, config):
"""
load the default config, then load the input config
"""
self.update(default_config)
if config is not None:
if not isinstance(config, dict):
raise RuntimeError("config must be a dict, "
"got %s" % type(config))
self.update(config)
self._set_cutout_types()
# need this to be unsigned
allowed = self['bitmask_allowed']
allowed = numpy.array([allowed],dtype='u4')
self['bitmask_allowed'] = allowed[0]
self['bitmask_allowed_inv'] = ~allowed[0]
| null |
meds/maker.py
|
maker.py
|
py
| 29,287 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "fitsio.FITS",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "defaults.default_values",
"line_number": 236,
"usage_type": "name"
},
{
"api_name": "numpy.where",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "fitsio.FITS",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 450,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 466,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 466,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 478,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 478,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 480,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 480,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 485,
"usage_type": "call"
},
{
"api_name": "util.MEDSCreationError",
"line_number": 486,
"usage_type": "call"
},
{
"api_name": "util.MEDSCreationError",
"line_number": 493,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 578,
"usage_type": "call"
},
{
"api_name": "fitsio.read_header",
"line_number": 582,
"usage_type": "call"
},
{
"api_name": "esutil.wcsutil.WCS",
"line_number": 584,
"usage_type": "call"
},
{
"api_name": "esutil.wcsutil",
"line_number": 584,
"usage_type": "attribute"
},
{
"api_name": "util.get_meds_output_struct",
"line_number": 600,
"usage_type": "call"
},
{
"api_name": "util.get_meds_output_struct",
"line_number": 603,
"usage_type": "call"
},
{
"api_name": "util.make_wcs_positions",
"line_number": 626,
"usage_type": "call"
},
{
"api_name": "util.radec_to_uv",
"line_number": 636,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 639,
"usage_type": "call"
},
{
"api_name": "numpy.ceil",
"line_number": 658,
"usage_type": "call"
},
{
"api_name": "numpy.ceil",
"line_number": 661,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 665,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 666,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 667,
"usage_type": "call"
},
{
"api_name": "util.make_wcs_positions",
"line_number": 672,
"usage_type": "call"
},
{
"api_name": "util.make_wcs_positions",
"line_number": 678,
"usage_type": "call"
},
{
"api_name": "util.radec_to_uv",
"line_number": 682,
"usage_type": "call"
},
{
"api_name": "numpy.deg2rad",
"line_number": 685,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 686,
"usage_type": "call"
},
{
"api_name": "bounds.Bounds",
"line_number": 690,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 725,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 726,
"usage_type": "call"
},
{
"api_name": "util.make_wcs_positions",
"line_number": 728,
"usage_type": "call"
},
{
"api_name": "bounds.Bounds",
"line_number": 730,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 745,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 752,
"usage_type": "call"
},
{
"api_name": "numpy.__version__",
"line_number": 776,
"usage_type": "attribute"
},
{
"api_name": "esutil.__version__",
"line_number": 777,
"usage_type": "attribute"
},
{
"api_name": "fitsio.__version__",
"line_number": 778,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
"line_number": 782,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 794,
"usage_type": "call"
},
{
"api_name": "esutil.numpy_util.copy_fields",
"line_number": 797,
"usage_type": "call"
},
{
"api_name": "esutil.numpy_util",
"line_number": 797,
"usage_type": "attribute"
},
{
"api_name": "util.get_meds_input_struct",
"line_number": 818,
"usage_type": "call"
},
{
"api_name": "util.get_meds_output_struct",
"line_number": 842,
"usage_type": "call"
},
{
"api_name": "esutil.numpy_util.copy_fields",
"line_number": 844,
"usage_type": "call"
},
{
"api_name": "esutil.numpy_util",
"line_number": 844,
"usage_type": "attribute"
},
{
"api_name": "util.get_meds_output_struct",
"line_number": 853,
"usage_type": "call"
},
{
"api_name": "numpy.dtype",
"line_number": 881,
"usage_type": "call"
},
{
"api_name": "defaults.default_config",
"line_number": 909,
"usage_type": "argument"
},
{
"api_name": "numpy.array",
"line_number": 921,
"usage_type": "call"
}
] |
269871591
|
from django.core.management import BaseCommand, CommandError
from django.test import override_settings
from tickets.models import Order
from tickets.tests import factories
class Command(BaseCommand):
def handle(self, *args, **kwargs):
if Order.objects.count() > 0:
raise CommandError('The database is already populated')
users = [factories.create_user(name) for name in ['Alice', 'Beatrice', 'Benedict']]
with override_settings(EMAIL_BACKEND='django.core.mail.backends.dummy.EmailBackend'):
factories.create_ticket(users[0], num_days=5)
factories.create_pending_order_for_others(users[0])
factories.create_confirmed_order_for_self_and_others(users[1], rate='corporate')
factories.create_confirmed_order_for_self(users[2], num_days=5)
| null |
ironcage/management/commands/generatesampledata.py
|
generatesampledata.py
|
py
| 827 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.core.management.BaseCommand",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "tickets.models.Order.objects.count",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tickets.models.Order.objects",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "tickets.models.Order",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.core.management.CommandError",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tickets.tests.factories.create_user",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tickets.tests.factories",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.test.override_settings",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "tickets.tests.factories.create_ticket",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tickets.tests.factories",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "tickets.tests.factories.create_pending_order_for_others",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tickets.tests.factories",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "tickets.tests.factories.create_confirmed_order_for_self_and_others",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tickets.tests.factories",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "tickets.tests.factories.create_confirmed_order_for_self",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tickets.tests.factories",
"line_number": 19,
"usage_type": "name"
}
] |
594921209
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Nov 6 2018
Authors:
Nathan De Lara <[email protected]>
Quality metrics for graph embeddings
"""
import numpy as np
from scipy import sparse
from scipy.stats import hmean
def dot_modularity(adjacency_matrix, embedding: np.ndarray, features=None, resolution=1., weights='degree',
return_all: bool=False):
"""
Difference of the weighted average dot product between embeddings of pairs of neighbors in the graph
(fit term) and pairs of nodes in the graph (diversity term).
:math:`Q = \\sum_{ij}(\\dfrac{A_{ij}}{w} - \\gamma \\dfrac{d_id_j}{w^2})x_i^Tx_j`
This metric is normalized to lie between -1 and 1.
If the embeddings are normalized, this reduces to the cosine modularity.
Parameters
----------
adjacency_matrix: sparse.csr_matrix or np.ndarray
the adjacency matrix of the graph
embedding: np.ndarray
the embedding to evaluate, embedding[i] must represent the embedding of node i
features: None or np.ndarray
For bipartite graphs, features should be the embedding of the second part
resolution: float
scaling for first-order approximation
weights: ``'degree'`` or ``'uniform'``
prior distribution on the nodes
return_all: bool, default = ``False``
whether to return (fit, diversity) or fit - diversity
Returns
-------
dot_modularity: a float or a tuple of floats.
"""
if type(adjacency_matrix) == sparse.csr_matrix:
adj_matrix = adjacency_matrix
elif sparse.isspmatrix(adjacency_matrix) or type(adjacency_matrix) == np.ndarray:
adj_matrix = sparse.csr_matrix(adjacency_matrix)
else:
raise TypeError(
"The argument must be a NumPy array or a SciPy Sparse matrix.")
n_nodes, m_nodes = adj_matrix.shape
total_weight: float = adjacency_matrix.data.sum()
if features is None:
if n_nodes != m_nodes:
raise ValueError('feature cannot be None for non-square adjacency matrices.')
else:
normalization = np.linalg.norm(embedding) ** 2 / np.sqrt(n_nodes * m_nodes)
features = embedding
else:
normalization = np.linalg.norm(embedding.dot(features.T)) / np.sqrt(n_nodes * m_nodes)
if weights == 'degree':
wou = adj_matrix.dot(np.ones(m_nodes)) / total_weight
win = adj_matrix.T.dot(np.ones(n_nodes)) / total_weight
elif weights == 'uniform':
wou = np.ones(n_nodes) / n_nodes
win = np.ones(m_nodes) / m_nodes
else:
raise ValueError('weights must be degree or uniform.')
fit = (np.multiply(embedding, adjacency_matrix.dot(features))).sum() / (total_weight * normalization)
diversity = (embedding.T.dot(wou)).dot(features.T.dot(win)) / normalization
if return_all:
return fit, resolution * diversity
else:
return fit - resolution * diversity
def hscore(adjacency_matrix, embedding: np.ndarray, order='second', return_all: bool=False):
"""Harmonic mean of fit and diversity with respect to first or second order node similarity.
Parameters
----------
adjacency_matrix: sparse.csr_matrix or np.ndarray
the adjacency matrix of the graph
embedding: np.ndarray
the embedding to evaluate, embedding[i] must represent the embedding of node i
order: \'first\' or \'second\'.
The order of the node similarity metric to use. First-order corresponds to edges weights while second-order
corresponds to the weights of the edges in the normalized cocitation graph.
return_all: bool, default = ``False``
whether to return (fit, diversity) or hmean(fit, diversity)
Returns
-------
hscore: a float or a tuple of floats.
"""
if type(adjacency_matrix) == sparse.csr_matrix:
adj_matrix = adjacency_matrix
elif sparse.isspmatrix(adjacency_matrix) or type(adjacency_matrix) == np.ndarray:
adj_matrix = sparse.csr_matrix(adjacency_matrix)
else:
raise TypeError(
"The argument must be a NumPy array or a SciPy Sparse matrix.")
n_nodes, m_nodes = adj_matrix.shape
if order == 'first' and (n_nodes != m_nodes):
raise ValueError('For fist order similarity, the adjacency matrix must be square.')
total_weight = adj_matrix.data.sum()
# out-degree vector
dou = adj_matrix.dot(np.ones(m_nodes))
# in-degree vector
din = adj_matrix.T.dot(np.ones(n_nodes))
pdhou, pdhin = np.zeros(n_nodes), np.zeros(m_nodes)
pdhou[dou.nonzero()] = 1 / np.sqrt(dou[dou.nonzero()])
pdhin[din.nonzero()] = 1 / np.sqrt(din[din.nonzero()])
normalization = np.linalg.norm(embedding.T * np.sqrt(dou)) ** 2
if order == 'first':
fit = (np.multiply(embedding, adjacency_matrix.dot(embedding))).sum()
fit /= total_weight * (np.linalg.norm(embedding) ** 2 / n_nodes)
elif order == 'second':
fit = np.linalg.norm(adj_matrix.T.dot(embedding).T * pdhin) ** 2 / normalization
else:
raise ValueError('The similarity order should be \'first\' or \'second\'.')
diversity = (np.linalg.norm(embedding.T.dot(dou))) ** 2 / total_weight
diversity = 1 - diversity / normalization
if return_all:
return fit, diversity
else:
if np.isclose(fit, 0.) or np.isclose(diversity, 0.):
return 0.
else:
return hmean([fit, diversity])
| null |
sknetwork/embedding/metrics.py
|
metrics.py
|
py
| 5,458 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.ndarray",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "scipy.sparse.csr_matrix",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "scipy.sparse",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "scipy.sparse.isspmatrix",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "scipy.sparse",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "scipy.sparse.csr_matrix",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "scipy.sparse",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "numpy.multiply",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "scipy.sparse.csr_matrix",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "scipy.sparse",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "scipy.sparse.isspmatrix",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "scipy.sparse",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "scipy.sparse.csr_matrix",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "scipy.sparse",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "numpy.ones",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "numpy.multiply",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "numpy.isclose",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "scipy.stats.hmean",
"line_number": 143,
"usage_type": "call"
}
] |
234598352
|
from __future__ import print_function
from __future__ import division
import os
import sys
import torch
import torch.nn as nn
import numpy as np
import scipy as sp
import torch.nn.functional as F
from torch.autograd import Variable
import re
from collections import Mapping, namedtuple, defaultdict, Sequence
from functools import partial,reduce
from itertools import product
import operator
import warnings
import six
import numpy as np
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
Parameters
----------
seed : None | int | instance of RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.model_selection import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
Uses :class:`ParameterGrid` to perform a full parallelized parameter
search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that before SciPy 0.16, the ``scipy.stats.distributions`` do not
accept a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space. Deterministic behavior is however
guaranteed from SciPy 0.16 onwards.
Read more in the :ref:`User Guide <search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int, RandomState instance or None, optional (default=None)
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.model_selection import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
n_iter = self.n_iter
if grid_size < n_iter:
warnings.warn(
'The total space of parameters %d is smaller '
'than n_iter=%d. Running %d iterations. For exhaustive '
'searches, use GridSearchCV.'
% (grid_size, self.n_iter, grid_size), UserWarning)
n_iter = grid_size
for i in sample_without_replacement(grid_size, n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
#if sp_version < (0, 16):
# params[k] = v.rvs()
#else:
params[k] = v.rvs(random_state=rnd)
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for name, v in p.items():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
if (isinstance(v, six.string_types) or
not isinstance(v, (np.ndarray, Sequence))):
raise ValueError("Parameter values for parameter ({0}) need "
"to be a sequence(but not a string) or"
" np.ndarray.".format(name))
if len(v) == 0:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a non-empty sequence.".format(name))
| null |
utils/misc.py
|
misc.py
|
py
| 11,080 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.random",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "numpy.random",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "numpy.integer",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.RandomState",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "numpy.random",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "collections.Mapping",
"line_number": 86,
"usage_type": "argument"
},
{
"api_name": "itertools.product",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "functools.partial",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 115,
"usage_type": "argument"
},
{
"api_name": "operator.mul",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "itertools.product",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "numpy.product",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "warnings.warn",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "six.moves.range",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "six.moves",
"line_number": 254,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 277,
"usage_type": "attribute"
},
{
"api_name": "six.string_types",
"line_number": 280,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 281,
"usage_type": "attribute"
},
{
"api_name": "collections.Sequence",
"line_number": 281,
"usage_type": "name"
}
] |
158691299
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author : Xfell
# Email: [email protected]
# File Name:app
# Date:2018/3/26
from flask import Flask,render_template,abort
import os
import json
app = Flask(__name__)
app.config['TEMPLATES_AUTO_RELOAD'] = True
class File(object):
dircetory = os.path.join(os.path.abspath(os.path.dirname(__name__)),'.','files')
def __init__(self):
self._file = self._read_all_file()
def _read_all_file(self):
file = {}
for files in os.listdir(self.dircetory):
files_abs = os.path.join(self.dircetory,files)
with open(files_abs,'r') as f:
file[files[:-5]] = json.load(f)
return file
def get_title_list(self):
return [ item['title'] for item in self._file.values()]
def get_content_list(self,filename):
return self._file.get(filename)
file = File()
@app.route('/')
def index():
title_list = file.get_title_list()
return render_template('index.html',title_list=title_list)
@app.route('/files/<filename>')
def files(filename):
content_list = file.get_content_list(filename)
if not content_list:
abort(404)
return render_template('file.html',content_list=content_list)
@app.errorhandler(404)
def not_found(e):
return render_template('404.html'),404
if __name__ == '__main__':
app.run()
| null |
challenge_6/app.py
|
app.py
|
py
| 1,366 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 51,
"usage_type": "call"
}
] |
115987368
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class ArtifactSourceFragment(Resource):
"""Properties of an artifact source.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The identifier of the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param location: The location of the resource.
:type location: str
:param tags: The tags of the resource.
:type tags: dict[str, str]
:param display_name: The artifact source's display name.
:type display_name: str
:param uri: The artifact source's URI.
:type uri: str
:param source_type: The artifact source's type. Possible values include:
'VsoGit', 'GitHub'
:type source_type: str or ~azure.mgmt.devtestlabs.models.SourceControlType
:param folder_path: The folder containing artifacts.
:type folder_path: str
:param arm_template_folder_path: The folder containing Azure Resource
Manager templates.
:type arm_template_folder_path: str
:param branch_ref: The artifact source's branch reference.
:type branch_ref: str
:param security_token: The security token to authenticate to the artifact
source.
:type security_token: str
:param status: Indicates if the artifact source is enabled (values:
Enabled, Disabled). Possible values include: 'Enabled', 'Disabled'
:type status: str or ~azure.mgmt.devtestlabs.models.EnableStatus
:param provisioning_state: The provisioning status of the resource.
:type provisioning_state: str
:param unique_identifier: The unique immutable identifier of a resource
(Guid).
:type unique_identifier: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'uri': {'key': 'properties.uri', 'type': 'str'},
'source_type': {'key': 'properties.sourceType', 'type': 'str'},
'folder_path': {'key': 'properties.folderPath', 'type': 'str'},
'arm_template_folder_path': {'key': 'properties.armTemplateFolderPath', 'type': 'str'},
'branch_ref': {'key': 'properties.branchRef', 'type': 'str'},
'security_token': {'key': 'properties.securityToken', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'unique_identifier': {'key': 'properties.uniqueIdentifier', 'type': 'str'},
}
def __init__(self, location=None, tags=None, display_name=None, uri=None, source_type=None, folder_path=None, arm_template_folder_path=None, branch_ref=None, security_token=None, status=None, provisioning_state=None, unique_identifier=None):
super(ArtifactSourceFragment, self).__init__(location=location, tags=tags)
self.display_name = display_name
self.uri = uri
self.source_type = source_type
self.folder_path = folder_path
self.arm_template_folder_path = arm_template_folder_path
self.branch_ref = branch_ref
self.security_token = security_token
self.status = status
self.provisioning_state = provisioning_state
self.unique_identifier = unique_identifier
| null |
azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/artifact_source_fragment.py
|
artifact_source_fragment.py
|
py
| 4,157 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "resource.Resource",
"line_number": 15,
"usage_type": "name"
}
] |
27736703
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Question: Is there a correlation between the relationships between the victim and the perpetrator?
dataArr = pd.read_csv("../data/database.csv")
# remove these columns
dataArr = (dataArr.drop(['Record ID', 'Agency Code','Agency Name','Agency Type','City', 'State', 'Year','Month', 'Incident', 'Crime Type', 'Crime Solved'],axis=1))
# print(dataArr.head(n=1))
# remove rows where the relationship is unknown
dataArr = dataArr[dataArr["Relationship"] != "Unknown"]
def condition(value):
if value != "Acquaintance" and value != "Stranger":
return "Family"
return value
dataArr['Relationship'] = dataArr['Relationship'].apply(condition)
# get count of each uniqie thing in Relationship and sort
grouped = dataArr.groupby("Relationship").size().reset_index()
grouped = grouped.sort_values(0, ascending=False)
print(grouped)
# plot the result
plt.pie(grouped[0], labels=grouped["Relationship"], autopct='%.2f')
# plt.ylabel("Homicides")
# plt.xlabel("Relationship")
plt.title("Homicides By Relationship Type")
# plt.tight_layout()
# Note, save your output to the plots folder. name it something
plt.savefig('../plots/q2_relationship_2.png')
| null |
scripts/q2_relationship_2.py
|
q2_relationship_2.py
|
py
| 1,237 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pie",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
}
] |
565420388
|
from flask import jsonify, request, abort, Response, render_template
from flight import app
from extractor import getData, filterResults
from datetime import datetime
import json
import requests
from weather import getWeather
url = "http://partners.api.skyscanner.net/apiservices/"
@app.route('/', methods=['GET'])
def index(path):
return app.send_from_directory('static', path)
@app.route('/flight/<country>/<currency>/<locale>/<originPlace>/<destinationPlace>/<inboundDate>', methods=['GET'])
@app.route('/flight/<country>/<currency>/<locale>/<originPlace>/<destinationPlace>/<outboundDate>/<inboundDate>', methods=['GET'])
def flight(country, currency, locale, originPlace, destinationPlace, inboundDate, outboundDate = None):
if request.args:
api = request.args.get('apiKey')
if not api:
return jsonify({ 'error': 'Missing API Key' })
adults = request.args.get('adults')
if not adults:
return jsonify({ 'error': 'Missing Adults attributes' })
children = request.args.get('children')
if not children or 0 <= int(children) >= 16:
return jsonify({ 'error': 'Incorrect children attributes' })
infants = request.args.get('infants')
if not infants or 0 <= int(infants) >= 16:
return jsonify({ 'error': 'Incorrect infants attributes' })
# The cabin class. Can be “economy”, “premiumeconomy”, “business”, “first”
cabinClass = request.args.get('cabinClass')
if not cabinClass or cabinClass.lower() not in ['economy', "premiumeconomy", "business", "first"]:
return jsonify({ 'error': 'Incorrect cabinClass attributes' })
minLayover = request.args.get('minLayover')
if not minLayover or int(minLayover) < 0:
return jsonify({ 'error': 'Incorrect minLayover attributes' })
includeCarriers = request.args.get('includeCarriers')
excludeCarriers = request.args.get('excludeCarriers')
groupPricing = request.args.get('groupPricing')
data = getData(originPlace,destinationPlace,inboundDate,int(adults),int(children),int(infants),cabinClass,int(minLayover))
# return jsonify(data)
return render_template('index.html', flights=data)
@app.route('/<originPlace>/<inboundDate>/<destinationPlace>/<layover>/<adults>/<children>/<infants>/<cabinClass>', methods=['GET'])
def fly(originPlace, inboundDate, destinationPlace, layover, adults, children, infants, cabinClass):
# originPlace = originPlace.lower()
# destinationPlace = destinationPlace.lower()
layover = layover * 60
if 0 <= int(children) >= 16:
return jsonify({ 'error': 'Incorrect children attributes' })
if 0 <= int(infants) >= 16:
return jsonify({ 'error': 'Incorrect infants attributes' })
cabinClass = cabinClass.lower()
cabinClass = cabinClass.replace(" class", "")
if cabinClass not in ['economy', "premiumeconomy", "business", "first"]:
return jsonify({ 'error': 'Incorrect cabinClass attributes' })
data = getData(originPlace,destinationPlace,inboundDate,int(adults),int(children),int(infants),cabinClass,int(layover))
return jsonify(data)
@app.route('/<originPlace>/<month>/<day>/<year>/<destinationPlace>/<layover>/<adults>/<children>/<infants>/<cabinClass>', methods=['GET'])
def price(originPlace, month, day, year, destinationPlace, layover, adults, children, infants, cabinClass):
# originPlace = originPlace.lower()
# destinationPlace = destinationPlace.lower()
layover = layover * 60
inboundDate = year + '-' + month + '-' + day
if 0 <= int(children) >= 16:
return jsonify({ 'error': 'Incorrect children attributes' })
if 0 <= int(infants) >= 16:
return jsonify({ 'error': 'Incorrect infants attributes' })
cabinClass = cabinClass.lower()
cabinClass = cabinClass.replace(" class", "")
if cabinClass not in ['economy', "premiumeconomy", "business", "first"]:
return jsonify({ 'error': 'Incorrect cabinClass attributes' })
data = getData(originPlace,destinationPlace,inboundDate,int(adults),int(children),int(infants),cabinClass,int(layover))
return jsonify(data)
@app.route('/weather/<airport>/<date>', methods=['GET'])
def weather(date, airport):
return jsonify(getWeather(date, airport))
@app.route('/suggest/<country>/<currency>/<locale>', methods=['GET'])
def suggest(country, currency, locale):
if request.args:
query = request.args.get('query')
api = request.args.get('api')
if not api:
return jsonify({ 'error': 'Missing API Key' })
if not query or len(query) < 2:
return jsonify({ 'error': 'Incorrect Query' })
suggestions = get_suggestions(country, currency, locale, query, api)
return jsonify({ 'suggestions': suggestions })
def get_suggestions(country, currency, locale, query, api):
suggest = requests.get(url+"autosuggest/v1.0/"+country+"/"+currency+"/"+locale+"?query="+query+"&apiKey="+api)
suggestJSON = json.loads(suggest.text)
return [{'PlaceId':x['PlaceId'],'PlaceName':x['PlaceName']} for x in suggestJSON['Places']]
| null |
src/flight/routes.py
|
routes.py
|
py
| 5,156 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flight.app.send_from_directory",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flight.app",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "flight.app.route",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flight.app",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "extractor.getData",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "flight.app.route",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flight.app",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "flight.app.route",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flight.app",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "extractor.getData",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "flight.app.route",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "flight.app",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "extractor.getData",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "flight.app.route",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "flight.app",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "weather.getWeather",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "flight.app.route",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "flight.app",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "flight.app.route",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "flight.app",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 97,
"usage_type": "call"
}
] |
207201112
|
from app.common.toolbox import doRender
from google.appengine.ext import db
from app.model import *
import datetime
from datetime import date
from app.base_handler import BaseHandler
from app.common.voluptuous import *
import json
import base64
import re
class GetUserHandler(BaseHandler):
def get(self, user_id):
self.auth()
user = User.get_by_id(int(user_id))
user.created_str = user.created.strftime('%B %dth, %Y')
doRender(self, 'view_user.html', {
'user': user
})
class EditUserHandler(BaseHandler):
def get(self, user_id):
self.auth()
user = self.current_user()
if not user.key().id() == int(user_id):
self.redirect('/user/' + user_id)
return None
else:
doRender(self, 'edit_user.html', {
'user': user
})
def post(self, user_id):
data_pattern = re.compile('data:image/(png|jpeg);base64,(.*)$')
json_str = self.request.body
data = json.loads(json_str)
self.auth()
user = self.current_user()
if not user.key().id() == int(user_id):
self.redirect('/user/' + user_id)
return None
else:
if data['photo'] is not None and len(data['photo']) > 0:
user.photo = db.Blob(base64.b64decode(data['photo']))
user.put()
class UserHandler(BaseHandler):
def get(self):
self.auth()
user = self.current_user()
doRender(self, 'view_user.html', {
'user': user
})
| null |
app/controllers/users.py
|
users.py
|
py
| 1,341 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "app.base_handler.BaseHandler",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "app.common.toolbox.doRender",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "app.base_handler.BaseHandler",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "app.common.toolbox.doRender",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db.Blob",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "base64.b64decode",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "app.base_handler.BaseHandler",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "app.common.toolbox.doRender",
"line_number": 61,
"usage_type": "call"
}
] |
412228127
|
import sys
import cv2
import numpy as np
import math
from random import shuffle
import selectinwindow
# Set recursion limit
sys.setrecursionlimit(10 ** 9)
drawing = False
xi, yi = -1, -1
B = [i for i in range(256)]
G = [i for i in range(256)]
R = [i for i in range(256)]
def nothing(x):
pass
# cv2.createTrackbar("HUE_MIN", "image", 0, 255, nothing)
# cv2.createTrackbar("HUE_MAX", "image", 0, 255, nothing)
def onMouse(event, x, y, flags, frame):
global xi, yi, drawing, B, G, R
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
xi, yi = x, y
shuffle(B), shuffle(G), shuffle(R)
# elif event == cv2.EVENT_MOUSEMOVE:
# if drawing:
# cv2.rectangle(frame, (xi, yi), (x, y), (B[0], G[0], R[0]), -1)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
cv2.rectangle(frame, (xi, yi), (x, y), (B[0], G[0], R[0]), 3)
# frame = np.zeros((512, 512, 3), np.uint8)
rgb_image = np.load("rgb_image_raw.npy")
rgb_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB)
cv2.namedWindow("frame")
cv2.setMouseCallback("frame", onMouse, param=rgb_image)
while True:
cv2.imshow("frame", rgb_image)
key = cv2.waitKey(1)
if key == 27:
break
# h_min = cv2.getTrackbarPos("HUE_MIN", "image")
# h_max = cv2.getTrackbarPos("HUE_MAX", "image")
cv2.destroyAllWindows()
| null |
mouse_event.py
|
mouse_event.py
|
py
| 1,357 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.setrecursionlimit",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.EVENT_LBUTTONDOWN",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "random.shuffle",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cv2.EVENT_LBUTTONUP",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "cv2.rectangle",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "cv2.namedWindow",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "cv2.setMouseCallback",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 56,
"usage_type": "call"
}
] |
571018383
|
from redis import StrictRedis
if __name__ == '__main__':
# 创建一个StrictRedis对象,链接redis数据库
try:
sr = StrictRedis()
# 添加一个key,为name,value为Simon
# res = sr.set('name', 'simon')
# print(res)
# 修改name的值为chou
# res = sr.set('name', 'chou')
# 获取name的值
# res = sr.get('name')
# print(res)
# 删除name及对应的值
# res = sr.delete('name')
# print(res)
# 查询所有键
res = sr.keys()
print(res)
except Exception as e:
print(e)
| null |
redis_test/redis_string.py
|
redis_string.py
|
py
| 629 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "redis.StrictRedis",
"line_number": 6,
"usage_type": "call"
}
] |
531811695
|
import tensorflow as tf
from tensorflow.keras import regularizers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from neural_nets.data_utils import *
from tensorflow.keras import Sequential
from tensorflow.keras.models import load_model
from tensorflow.keras.layers import *
from tensorflow.keras.callbacks import LearningRateScheduler
import os
import config
import matplotlib.pyplot as plt
def init_digits_CNN_model():
num_classes = 10
input_shape = (28, 28, 1)
initializer = tf.initializers.VarianceScaling(scale=2.0)
layers = [
tf.keras.layers.Conv2D(input_shape=input_shape, filters=32, kernel_size=(3, 3),
activation='relu', strides=(1, 1), padding='same',
kernel_initializer=initializer),
tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu',
strides=(1, 1), padding='same',
kernel_initializer=initializer),
tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=None, padding='same'),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu',
kernel_initializer=initializer),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(num_classes, activation='softmax',
kernel_initializer=initializer)
]
model = tf.keras.Sequential(layers)
return model
def init_digits_CNN_model_2():
num_classes = 10
input_shape = (28, 28, 1)
initializer = tf.initializers.VarianceScaling(scale=2.0)
model = Sequential()
model.add(Conv2D(32, kernel_size=3, activation='relu', input_shape=(28, 28, 1)))
# model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size=3, activation='relu'))
# model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size=5, strides=2, padding='same', activation='relu'))
# model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(64, kernel_size=3, activation='relu'))
# model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=3, activation='relu'))
# model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=5, strides=2, padding='same', activation='relu'))
# model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(128, kernel_size=4, activation='relu'))
# model.add(BatchNormalization())
model.add(Flatten())
model.add(Dropout(0.4))
model.add(Dense(10, activation='softmax'))
return model
def optimizer_init_fn(learning_rate):
# optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, nesterov=True, momentum=0.9)
# optimizer = tf.keras.optimizers.Adagrad(learning_rate=learning_rate)
# optimizer = tf.keras.optimizers.RMSprop(learning_rate=learning_rate)
optimizer = tf.keras.optimizers.Adam()
return optimizer
def print_learning_curves(loss_history, train_acc_history, val_acc_history):
# Отображение функции потерь и точности обучения/валидации
plt.subplot(2, 1, 1)
plt.plot(loss_history)
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.subplot(2, 1, 2)
plt.plot(train_acc_history, label='train')
plt.plot(val_acc_history, label='val')
plt.title('Classification accuracy history')
plt.xlabel('Epoch')
plt.ylabel('Clasification accuracy')
plt.legend()
plt.show()
def generate_random_hyperparams(lr_min, lr_max, reg_min, reg_max):
lr = 10 ** np.random.uniform(lr_min, lr_max)
reg = 10 ** np.random.uniform(reg_min, reg_max)
return lr, reg
def update_hyper_params(params, lr, reg):
params["lr"] = lr
params["reg"] = reg
return params
x_train, y_train, x_val, y_val, x_test, y_test = get_mnist_digits_data_TF()
datagen = ImageDataGenerator(rotation_range=10,
zoom_range=0.10,
width_shift_range=0.1,
height_shift_range=0.1)
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x)
lr, reg = generate_random_hyperparams(-4, -2, -6, -2)
optimizer = optimizer_init_fn(lr)
model = init_digits_CNN_model_2()
model.compile(optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=[tf.keras.metrics.sparse_categorical_accuracy])
train_generated_data = datagen.flow(x_train, y_train, batch_size=64)
history = model.fit_generator(train_generated_data,
epochs=10, steps_per_epoch=x_train.shape[0] // 64,
validation_data=(x_val, y_val),
callbacks=[annealer])
loss_history = history.history['loss']
train_acc = history.history['sparse_categorical_accuracy'][-1]
val_acc = history.history['val_sparse_categorical_accuracy'][-1]
print_learning_curves(loss_history, history.history['sparse_categorical_accuracy'], history.history['val_sparse_categorical_accuracy'])
print("-------------------------------------------------------------")
print("train_accuracy = {0}, val_accuracy={1}".format(train_acc, val_acc))
print("lr = {0}, reg={1}".format(lr, reg))
print("-------------------------------------------------------------")
score = model.evaluate(x_test, y_test, verbose=0)
print(score)
# model.save(os.path.join(config.MODELS_DIR, config.digits_CNN_DataGen_tf))
# model = load_model(os.path.join(config.MODELS_DIR, config.digits_CNN_DataGen_tf))
# score = model.evaluate(x_test, y_test, verbose=0)
# print(score)
| null |
neural_nets/tensorflow_models/tf_digits_CNN_Data_Gen.py
|
tf_digits_CNN_Data_Gen.py
|
py
| 5,671 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "tensorflow.initializers.VarianceScaling",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tensorflow.initializers",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Conv2D",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Conv2D",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.MaxPool2D",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dropout",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Flatten",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dropout",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.Sequential",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.initializers.VarianceScaling",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "tensorflow.initializers",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.Sequential",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.optimizers.Adam",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.preprocessing.image.ImageDataGenerator",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.callbacks.LearningRateScheduler",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 133,
"usage_type": "attribute"
}
] |
558568422
|
"""
The :mod:`spirograph.main` module contains the :function:`main`.
The :function:`main` function, along with it's associated helper functions, will make it easy to draw multiple
spirographs from the command line.
"""
import argparse
import turtle
from spirograph.my_spiro import Spirograph
from spirograph.my_spiro_animator import SpirographsAnimator
description_string = """
This program draws spirographs using the Turtle module.
When run with no arguments, this program draws random spirographs.
Terminology:
R: radius of outer circle.
r: radius of inner circle.
l: ratio of hole distance to r.
"""
"""
str: The description string that will show before the argument parser.
"""
parser_name_or_flag = "--sparams"
"""
str: The name of the argument used in the parser.
"""
parser_destination = "sparams"
"""
str: The name of the destination for the argument parser.
"""
mumber_of_arguments = 3
"""
int: The number of arguments to pass into the parser.
"""
helper_string = """
The three arguments in sparams: R, r, l.
"""
"""
str: The helper string that will
"""
screen_width = 0.8
"""
float: The proportion of the screen width to use for the turtle window.
Can be any value :math:`x`, where :math:`0.0 \\leq x \\leq 1.0``
"""
turtle_shape = 'turtle'
"""
str: The shape of the turtle.
"""
title = "Spirographs!"
"""
str: The title of the turtle window.
"""
def _argument_parsing():
"""
Returns a parser that will parse arguments.
Returns
-------
argparse.Namespace
Argument parser.
"""
print('Generating spirograph...')
parser = argparse.ArgumentParser(description=description_string)
parser.add_argument(
parser_name_or_flag,
nargs=mumber_of_arguments,
dest=parser_destination,
required=False,
help=helper_string
)
args = parser.parse_args()
return args
def _prepare_turtle():
"""
Prepare the turtle window.
Returns
-------
None
"""
turtle.setup(width=screen_width)
turtle.shape(turtle_shape)
turtle.title(title)
def _if_else_statement(args=None):
"""
Draw a specific number of spirographs if a command line option is given.
Otherwise, draw 4 spirographs.
Parameters
----------
args : argparse.Namespace, optional
Optional command line arguments.
Returns
-------
None
"""
if args.sparams:
parameters = [float(x) for x in args.sparams]
color = (0.0, 0.0, 0.0)
spirograph = Spirograph(0, 0, color, *parameters)
spirograph.draw()
else:
spirographs_animator = SpirographsAnimator(4)
turtle.onkey(spirographs_animator.toggle_visible_turtles, "t")
turtle.onkey(spirographs_animator.restart, "space")
def main():
"""
Run the spirographs.
Returns
-------
None
"""
args = _argument_parsing()
_prepare_turtle()
_if_else_statement(args)
turtle.mainloop()
"""
def main():
# use sys.argv if needed
print('Generating spirograph...')
# create parser
descStr = \"""This program draws spirographs using the Turtle module.
When run with no arguments, this program draws random spirographs.
Terminology:
R: radius of outer circle.
r: radius of inner circle.
l: ratio of hole distance to r.
\"""
parser = argparse.ArgumentParser(description = descStr)
# add expected arguments
parser.add_argument(
'--sparams', nargs = 3, dest = 'sparams', required = False,
help = "The three arguments in sparams: R, r, l."
)
# parse args
args = parser.parse_args()
# set to 80% screen width
turtle.setup(width = 0.8)
# set cursor shape
turtle.shape('turtle')
# set title
turtle.title("Spirographs!")
# add key handler for saving images
# turtle.onkey(saveDrawing, "s")
# start listening
# turtle.listen()
# hide main turtle cursor
turtle.hideturtle()
# checks args and draw
if args.sparams:
parameters = [float(x) for x in args.sparams]
# draw spirograph with given parameters
# black by default
col = (0.0, 0.0, 0.0)
spirograph = Spirograph(0, 0, col, *parameters)
spirograph.draw()
else:
# create animator object
spirograph_animator = SpirographsAnimator(4)
# add key handler to toggle turtle cursor
turtle.onkey(spirograph_animator.toggle_visible_turtles, "t")
# add key handler to restart animation
# turtle.onkey(spirograph_animator.restart, "space")
# start turtle main loop
turtle.mainloop()
"""
if __name__ == "__main__":
main()
| null |
spirograph/main.py
|
main.py
|
py
| 4,702 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "turtle.setup",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "turtle.shape",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "turtle.title",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "spirograph.my_spiro",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "spirograph.my_spiro.Spirograph",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "spirograph.my_spiro.draw",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "spirograph.my_spiro",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "spirograph.my_spiro_animator.SpirographsAnimator",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "turtle.onkey",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "turtle.onkey",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "turtle.mainloop",
"line_number": 138,
"usage_type": "call"
}
] |
107530508
|
from jesusanaya_com.models import get_engine, get_session_factory, get_tm_session
from jesusanaya_com.models.meta import Base
import pytest
import transaction
@pytest.fixture(scope="session")
def db_engine(request):
engine = get_engine({"sqlalchemy.url": "sqlite:///:memory:"})
Base.metadata.create_all(engine)
def destroy_db():
transaction.abort()
Base.metadata.drop_all(engine)
request.addfinalizer(destroy_db)
return engine
@pytest.fixture(scope="session")
def dbsession(db_engine):
session_factory = get_session_factory(db_engine)
session = get_tm_session(session_factory, transaction.manager)
return session
| null |
jesusanaya_com/tests/conftest.py
|
conftest.py
|
py
| 667 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "jesusanaya_com.models.get_engine",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "jesusanaya_com.models.meta.Base.metadata.create_all",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "jesusanaya_com.models.meta.Base.metadata",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "jesusanaya_com.models.meta.Base",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "transaction.abort",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "jesusanaya_com.models.meta.Base.metadata.drop_all",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "jesusanaya_com.models.meta.Base.metadata",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "jesusanaya_com.models.meta.Base",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "jesusanaya_com.models.get_session_factory",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "jesusanaya_com.models.get_tm_session",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "transaction.manager",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "pytest.fixture",
"line_number": 20,
"usage_type": "call"
}
] |
209801353
|
#!/usr/bin/env python
import sys
import os
from shutil import copyfile
import time
import itertools
import torch
from torch.autograd import Variable
import torchvision.utils as vutils
from options import TrainOptions, create_sub_dirs
from data import load, AlignedIterator, UnalignedIterator
from model import StochCycleGAN, AugmentedCycleGAN
import numpy as np
import shutil
import random
import glob
def copy_scripts_to_folder(expr_dir):
dir_path = os.path.dirname(os.path.realpath(__file__))
for f in glob.glob("%s/*.py" % dir_path):
shutil.copy(f, expr_dir)
def print_log(out_f, message):
out_f.write(message+"\n")
out_f.flush()
print(message)
def format_log(epoch, i, errors, t, prefix=True):
message = '(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t)
if not prefix:
message = ' ' * len(message)
for k, v in list(errors.items()):
message += '%s: %.3f ' % (k, v)
return message
def visualize_cycle(opt, real_A, visuals, eidx, uidx, train):
size = real_A.size()
images = [img.cpu().unsqueeze(1) for img in list(visuals.values())]
vis_image = torch.cat(images, dim=1).view(size[0]*len(images),size[1],size[2],size[3])
if train:
save_path = opt.train_vis_cycle
else:
save_path = opt.vis_cycle
save_path = os.path.join(save_path, 'cycle_%02d_%04d.png' % (eidx, uidx))
vutils.save_image(vis_image.cpu(), save_path,
normalize=True, range=(-1,1), nrow=len(images))
copyfile(save_path, os.path.join(opt.vis_latest, 'cycle.png'))
def visualize_multi(opt, real_A, model, eidx, uidx):
size = real_A.size()
# all samples in real_A share the same prior_z_B
multi_prior_z_B = Variable(real_A.data.new(opt.num_multi,
opt.nlatent, 1, 1).normal_(0, 1).repeat(size[0],1,1,1), volatile=True)
multi_fake_B = model.generate_multi(real_A.detach(), multi_prior_z_B)
multi_fake_B = multi_fake_B.data.cpu().view(
size[0], opt.num_multi, size[1], size[2], size[3])
vis_multi_image = torch.cat([real_A.data.cpu().unsqueeze(1), multi_fake_B], dim=1) \
.view(size[0]*(opt.num_multi+1),size[1],size[2],size[3])
save_path = os.path.join(opt.vis_multi, 'multi_%02d_%04d.png' % (eidx, uidx))
vutils.save_image(vis_multi_image.cpu(), save_path,
normalize=True, range=(-1,1), nrow=opt.num_multi+1)
copyfile(save_path, os.path.join(opt.vis_latest, 'multi.png'))
def visualize_inference(opt, real_A, real_B, model, eidx, uidx):
size = real_A.size()
real_B = real_B[:opt.num_multi]
# all samples in real_A share the same post_z_B
multi_fake_B = model.inference_multi(real_A.detach(), real_B.detach())
multi_fake_B = multi_fake_B.data.cpu().view(
size[0], opt.num_multi, size[1], size[2], size[3])
vis_multi_image = torch.cat([real_A.data.cpu().unsqueeze(1), multi_fake_B], dim=1) \
.view(size[0]*(opt.num_multi+1),size[1],size[2],size[3])
vis_multi_image = torch.cat([torch.ones(1, size[1], size[2], size[3]).cpu(), real_B.data.cpu(),
vis_multi_image.cpu()], dim=0)
save_path = os.path.join(opt.vis_inf, 'inf_%02d_%04d.png' % (eidx, uidx))
vutils.save_image(vis_multi_image.cpu(), save_path,
normalize=True, range=(-1,1), nrow=opt.num_multi+1)
copyfile(save_path, os.path.join(opt.vis_latest, 'inf.png'))
def train_model():
opt = TrainOptions().parse(sub_dirs=['vis_multi','vis_cycle','vis_latest','train_vis_cycle'])
out_f = open("%s/results.txt" % opt.expr_dir, 'w')
copy_scripts_to_folder(opt.expr_dir)
use_gpu = len(opt.gpu_ids) > 0
if opt.seed is not None:
print(("using random seed:", opt.seed))
random.seed(opt.seed)
np.random.seed(opt.seed)
torch.manual_seed(opt.seed)
if use_gpu:
torch.cuda.manual_seed_all(opt.seed)
if opt.numpy_data:
trainA, trainB, _, _ = load(opt.dataroot)
train_dataset = UnalignedIterator(trainA, trainB, batch_size=opt.batchSize)
print_log(out_f, '#training images = %d' % len(train_dataset))
else:
raise NotImplementedError
if opt.supervised:
if opt.numpy_data:
sup_size = opt.sup_num
sup_trainA = trainA[:sup_size]
sup_trainB = trainB[:sup_size]
sup_train_dataset = AlignedIterator(sup_trainA, sup_trainB, batch_size=opt.batchSize)
else:
raise NotImplementedError
sup_train_dataset = itertools.cycle(sup_train_dataset)
print_log(out_f, '#supervised images = %d' % sup_size)
# create_model
if opt.model == 'stoch_cycle_gan':
model = StochCycleGAN(opt)
elif opt.model == 'cycle_gan':
model = StochCycleGAN(opt, ignore_noise=True)
elif opt.model == 'aug_cycle_gan':
model = AugmentedCycleGAN(opt)
create_sub_dirs(opt, ['vis_inf'])
vis_inf = True
else:
raise NotImplementedError('Specified model is not implemented.')
print_log(out_f, "model [%s] was created" % (model.__class__.__name__))
# visualizer = Visualizer(opt)
total_steps = 0
print_start_time = time.time()
create_sub_dirs(opt, ['vis_pred_B'])
for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
print("EPOCH " + str(epoch))
epoch_start_time = time.time()
epoch_iter = 0
for i, data in enumerate(train_dataset):
real_A, real_B = Variable(data['A']), Variable(data['B'])
if real_A.size(0) != real_B.size(0):
continue
prior_z_B = Variable(real_A.data.new(real_A.size(0), opt.nlatent, 1, 1).normal_(0, 1))
total_steps += opt.batchSize
epoch_iter += opt.batchSize
if use_gpu:
real_A = real_A.cuda()
real_B = real_B.cuda()
prior_z_B = prior_z_B.cuda()
if opt.monitor_gnorm:
losses, visuals, gnorms = model.train_instance(real_A, real_B, prior_z_B)
else:
losses, visuals = model.train_instance(real_A, real_B, prior_z_B)
# supervised training
if opt.supervised:
sup_data = next(sup_train_dataset)
sup_real_A, sup_real_B = Variable(sup_data['A']), Variable(sup_data['B'])
if use_gpu:
sup_real_A, sup_real_B = sup_real_A.cuda(), sup_real_B.cuda()
sup_losses = model.supervised_train_instance(sup_real_A, sup_real_B, prior_z_B)
if total_steps % opt.display_freq == 0:
print("VISUALIZING")
# visualize current training batch
visualize_cycle(opt, real_A, visuals, epoch, epoch_iter/opt.batchSize, train=True)
if total_steps % opt.print_freq == 0:
t = (time.time() - print_start_time) / opt.batchSize
print_log(out_f, format_log(epoch, epoch_iter, losses, t))
if opt.supervised:
print_log(out_f, format_log(epoch, epoch_iter, sup_losses, t, prefix=False))
if opt.monitor_gnorm:
print_log(out_f, format_log(epoch, epoch_iter, gnorms, t, prefix=False)+"\n")
print_start_time = time.time()
if epoch % opt.save_epoch_freq == 0:
print_log(out_f, 'saving the model at the end of epoch %d, iters %d' %
(epoch, total_steps))
model.save('latest')
print_log(out_f, 'End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
if epoch > opt.niter:
model.update_learning_rate()
out_f.close()
if __name__ == "__main__":
train_model()
| null |
edges2shoes_exp/train.py
|
train.py
|
py
| 7,795 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.dirname",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "shutil.copy",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "torchvision.utils.save_image",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torchvision.utils",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "shutil.copyfile",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "model.generate_multi",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "torchvision.utils.save_image",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torchvision.utils",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "shutil.copyfile",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "model.inference_multi",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "torchvision.utils.save_image",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "torchvision.utils",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "shutil.copyfile",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "options.TrainOptions",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "torch.manual_seed",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "torch.cuda.manual_seed_all",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "data.load",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "data.UnalignedIterator",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "data.AlignedIterator",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "itertools.cycle",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "model.StochCycleGAN",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "model.StochCycleGAN",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "model.AugmentedCycleGAN",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "options.create_sub_dirs",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "model.__class__",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "options.create_sub_dirs",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "model.train_instance",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "model.train_instance",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "model.supervised_train_instance",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "model.save",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "model.update_learning_rate",
"line_number": 194,
"usage_type": "call"
}
] |
233156173
|
import pytest
import numpy
import pandas
def pytest_addoption(parser):
parser.addoption("--skip-slow", action="store_true",
help="skip slow tests")
parser.addoption("--skip-network", action="store_true",
help="run network tests")
parser.addoption("--only-slow", action="store_true",
help="run only slow tests")
def pytest_runtest_setup(item):
if 'slow' in item.keywords and item.config.getoption("--skip-slow"):
pytest.skip("skipping due to --skip-slow")
if 'slow' not in item.keywords and item.config.getoption("--only-slow"):
pytest.skip("skipping due to --only-slow")
if 'network' in item.keywords and item.config.getoption("--skip-network"):
pytest.skip("skipping due to --skip-network")
# For running doctests: make np and pd names available
@pytest.fixture(autouse=True)
def add_imports(doctest_namespace):
doctest_namespace['np'] = numpy
doctest_namespace['pd'] = pandas
| null |
pandas/conftest.py
|
conftest.py
|
py
| 1,007 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pytest.skip",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pytest.skip",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pytest.skip",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 29,
"usage_type": "call"
}
] |
127275463
|
import json
import sys
import textwrap
from engine import GameEngine
COLUMNS = 80
class Application:
def __init__(self, game, use_audio=True):
if use_audio:
print('Loading hardware ...')
from farmer_says import FarmerSays
print('... done')
self._hardware = FarmerSays()
self._hardware.start_audio_thread()
else:
self._hardware = None
with open('public/' + game + '/ROOMS.json') as f:
rooms = json.load(f)
with open('public/' + game + '/OBJECTS.json') as f:
objects = json.load(f)
self._engine = GameEngine(objects, rooms)
if self._hardware:
self._hardware.set_audio_path('public/' + game + '/audio/')
def print_long(self, txt):
paras = txt.split('\n')
for para in paras:
print(textwrap.fill(para.strip(), COLUMNS))
def play_prompts(self, prs):
if not self._hardware:
return
# stop any existing audio
self._hardware.stop_audio()
for pr in prs:
self._hardware.queue_prompt([pr, 0.25])
def console_loop(self):
known = ['NORTH', 'SOUTH', 'EAST', 'WEST', 'ACTION', 'LOOK',
'GETLEFT', 'GETRIGHT',
'DROPLEFT', 'DROPRIGHT',
'USELEFT', 'USERIGHT']
replace = {
'E': 'EAST',
'W': 'WEST',
'N': 'NORTH',
'S': 'SOUTH'
}
first_cmd = True
while True:
while True:
if first_cmd:
cmd = 'LOOK'
first_cmd = False
else:
cmd = input('> ').upper().replace(' ', '')
if cmd in replace:
cmd = replace[cmd]
if cmd not in known:
print('## Unknown command:', cmd)
continue
cmd = self._engine.decode_button_command(cmd.upper())
prs = self._engine.process_command(cmd)
if prs:
aud_prs = self._engine.prompts_only(prs)
self.play_prompts(aud_prs)
prs = self._engine.text_only(prs)
self.print_long(prs)
if __name__ == '__main__':
use_audio = 'audio' in sys.argv
app = Application(sys.argv[1], use_audio)
app.console_loop()
| null |
src/app_console.py
|
app_console.py
|
py
| 2,469 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "farmer_says.FarmerSays",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "engine.GameEngine",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "textwrap.fill",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "{'FarmerSays': 'farmer_says.FarmerSays'}",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 92,
"usage_type": "attribute"
}
] |
468552044
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 19 15:52:19 2020
@author: Karan
"""
import time
import requests
import io
import os
import hashlib
from selenium import webdriver
from PIL import Image
def fetch_image_urls(query, max_links_to_fetch, wd, sleep_between_interactions=1):
def scroll_to_end(wd):
wd.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(sleep_between_interactions)
search_url = "https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&q={q}&oq={q}&gs_l=img"
wd.get(search_url.format(q=query))
image_urls = set()
image_count = 0
results_start = 0
while image_count < max_links_to_fetch:
scroll_to_end(wd)
thumbnail_results = wd.find_elements_by_css_selector("img.Q4LuWd")
number_results = len(thumbnail_results)
print(f"Found: {number_results} search results. Extracting links from {results_start}:{number_results}")
for img in thumbnail_results[results_start:number_results]:
try:
img.click()
time.sleep(sleep_between_interactions)
except Exception:
continue
actual_images = wd.find_elements_by_css_selector('img.n3VNCb')
for actual_image in actual_images:
if actual_image.get_attribute('src') and 'http' in actual_image.get_attribute('src'):
image_urls.add(actual_image.get_attribute('src'))
image_count = len(image_urls)
if len(image_urls) >= max_links_to_fetch:
print(f"Found: {len(image_urls)} image links, done!")
break
else:
print("Found:", len(image_urls), "image links, looking for more ...")
time.sleep(30)
return
load_more_button = wd.find_element_by_css_selector(".mye4qd")
if load_more_button:
wd.execute_script("document.querySelector('.mye4qd').click();")
results_start = len(thumbnail_results)
return image_urls
def persist_image(folder_path,url):
try:
image_content = requests.get(url).content
except Exception as e:
print(f"ERROR - Could not download {url} - {e}")
try:
image_file = io.BytesIO(image_content)
image = Image.open(image_file).convert('RGB')
file_path = os.path.join(folder_path,hashlib.sha1(image_content).hexdigest()[:10] + '.jpg')
with open(file_path, 'wb') as f:
image.save(f, "JPEG", quality=85)
print(f"SUCCESS - saved {url} - as {file_path}")
except Exception as e:
print(f"ERROR - Could not save {url} - {e}")
def search_and_download(search_term:str,driver_path:str,target_path='./images',number_images=5): #specify the name of the folder in which the images have to be saved in target_path
target_folder = os.path.join(target_path,'_'.join(search_term.lower().split(' ')))
if not os.path.exists(target_folder):
os.makedirs(target_folder)
with webdriver.Chrome(executable_path=driver_path) as wd:
res = fetch_image_urls(search_term, number_images, wd=wd, sleep_between_interactions=0.5)
for elem in res:
persist_image(target_folder,elem)
#ADD DRIVER PATH
#Chrome driver for Google Chrome version 87.* is present in the repository.
driver_path=r'chromedriver.exe'
#ALL SEARCH TERMS
search_terms=['eiffel tower',
'cristiano ronaldo',
'real madrid']
#Data stored in folder 'images'
for search_term in search_terms:
search_and_download(search_term, driver_path,number_images=10)
print('DOWNLOADED: ',search_term)
| null |
scrapper.py
|
scrapper.py
|
py
| 3,730 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "time.sleep",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "hashlib.sha1",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 96,
"usage_type": "name"
}
] |
536325315
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from functools import partial
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.tools.misc import formatLang
class AccountInvoice(models.Model):
_inherit = "account.invoice"
on_margin = fields.Boolean('Is on margin', compute='_compute_on_margin', store=True, oldname='is_margin')
amount_by_group_wo_margin_tax = fields.Binary(
compute='_amount_by_group_wo_margin_tax',
string='Tax amount by group (wo on margin tax)'
)
@api.one
@api.depends('invoice_line_ids.price_subtotal', 'tax_line_ids.amount',
'tax_line_ids.amount_rounding','currency_id', 'company_id',
'date_invoice', 'type'
)
def _compute_amount(self):
"""
"""
super(AccountInvoice, self)._compute_amount()
self.amount_tax = sum(
self.currency_id.round(line.amount_total)
for line in self.tax_line_ids
if not line.tax_id.on_margin
)
self.amount_total = self.amount_untaxed + self.amount_tax
@api.depends('invoice_line_ids')
def _compute_on_margin(self):
for invoice in self:
invoice.on_margin = bool(invoice.invoice_line_ids.filtered('invoice_line_tax_ids.on_margin'))
def _amount_by_group_wo_margin_tax(self):
"""
"""
for invoice in self:
fmt = partial(
formatLang,
invoice.with_context(lang=invoice.partner_id.lang).env,
currency_obj=invoice.currency_id or invoice.company_id.currency_id
)
res = {}
for line in invoice.tax_line_ids:
if line.tax_id.on_margin:
continue
res.setdefault(line.tax_id.tax_group_id, {'base': 0.0, 'amount': 0.0})
res[line.tax_id.tax_group_id]['amount'] += line.amount_total
res[line.tax_id.tax_group_id]['base'] += line.base
res = sorted(res.items(), key=lambda l: l[0].sequence)
invoice.amount_by_group_wo_margin_tax = [(
r[0].name, r[1]['amount'], r[1]['base'],
fmt(r[1]['amount']), fmt(r[1]['base']),
len(res),
) for r in res]
def _prepare_tax_line_vals(self, line, tax):
values = super(AccountInvoice, self)._prepare_tax_line_vals(line, tax)
if tax.get('on_margin'):
values['on_margin'] = True
return values
@api.multi
def get_taxes_values(self):
"""
"""
on_margin_invoices = self.filtered('on_margin')
result = super(AccountInvoice, self - on_margin_invoices).get_taxes_values()
for line in on_margin_invoices.invoice_line_ids:
if not line.invoice_id.account_id:
continue
round_curr = line.invoice_id.currency_id.round
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
if not line.on_margin:
taxes = line.invoice_line_tax_ids.compute_all(
price,
currency=line.invoice_id.currency_id,
quantity=line.quantity,
product=line.product_id,
partner=line.invoice_id.partner_id
)['taxes']
else:
cost_price = line.invoice_id.company_id.currency_id._convert(
line.cost_price,
line.invoice_id.currency_id,
line.invoice_id.company_id,
line.invoice_id.date_invoice or fields.Date.today()
)
taxes = line.invoice_line_tax_ids.compute_all(
price,
currency=line.invoice_id.currency_id,
quantity=line.quantity,
product=line.product_id,
partner=line.invoice_id.partner_id,
margin=(price * line.quantity) - cost_price
)['taxes']
for tax in taxes:
val = self._prepare_tax_line_vals(line, tax)
key = self.env['account.tax'].browse(tax['id']).get_grouping_key(val)
if key not in result:
result[key] = val
result[key]['base'] = round_curr(val['base'])
else:
result[key]['amount'] += val['amount']
result[key]['base'] += round_curr(val['base'])
return result
def _create_split_move_lines(self, ml, invoice_line):
"""
"""
tax_amount = 0.00
for tax in self.env['account.move.line'].resolve_2many_commands('tax_ids', ml.get('tax_ids')):
tax_amount += tax.get('amount')
split_ml1 = dict(ml) # Purchase price in company currency converted to invoice currency
split_ml2 = dict(ml) # Base to compute VAT from margin, also using the converted cost price
cost_price = self.company_id.currency_id._convert(
invoice_line.cost_price,
self.currency_id,
self.company_id,
self.date_invoice or fields.Date.today()
)
margin = (invoice_line.price_unit * invoice_line.quantity) - cost_price
amount = margin - (margin / (1 + tax_amount / 100))
split_ml1['price_unit'] = split_ml1['price'] = cost_price
split_ml1['tax_ids'] = [(4, invoice_line.invoice_id.company_id.on_margin_sale_tax_id.id, None)]
split_ml2.update({
'price': margin - amount,
'price_unit': margin - amount
})
return [split_ml1, split_ml2]
@api.model
def invoice_line_move_line_get(self):
"""
"""
res = super(AccountInvoice, self).invoice_line_move_line_get()
if not self.on_margin:
return res
result = []
for ml in res:
invoice_line = self.env['account.invoice.line'].browse(ml.get('invl_id'))
if not invoice_line.on_margin:
result.append(ml)
continue
splited_mls = self._create_split_move_lines(ml, invoice_line)
result.extend(splited_mls)
return result
@api.multi
def action_invoice_open(self):
"""
"""
for invoice in self:
if not invoice.on_margin:
continue
if not invoice.company_id.on_margin_sale_tax_id:
raise UserError(_('The tax on margin has not been configured on the company that is issuing the invoice. Please go to "Company settings" and configure the tax on margin.'))
return super(AccountInvoice, self).action_invoice_open()
@api.multi
def action_move_create(self):
""" Creates invoice related analytics and financial move lines """
account_move = self.env['account.move']
for inv in self:
if not inv.journal_id.sequence_id:
raise UserError(_('Please define sequence on the journal related to this invoice.'))
if not inv.invoice_line_ids:
raise UserError(_('Please create some invoice lines.'))
if inv.move_id:
continue
ctx = dict(self._context, lang=inv.partner_id.lang)
if not inv.date_invoice:
inv.with_context(ctx).write({'date_invoice': fields.Date.context_today(self)})
if not inv.date_due:
inv.with_context(ctx).write({'date_due': inv.date_invoice})
company_currency = inv.company_id.currency_id
# create move lines (one per invoice line + eventual taxes and analytic lines)
# iml = inv.invoice_line_move_line_get()
iml = []
for move_line in inv.invoice_line_move_line_get():
product = self.env['product.product'].browse(move_line.get('product_id'))
invl = self.env['account.invoice.line'].browse(move_line.get('invl_id'))
if any(invl.invoice_line_tax_ids.mapped('on_margin')):
margin = (invl.price_unit*invl.quantity) - invl.cost_price
split_move_line = dict(move_line)
split_move_line1 = dict(move_line)
tax_amount = 0.00
for tax in self.env['account.move.line'].resolve_2many_commands('tax_ids', split_move_line.get('tax_ids')):
tax_amount += tax.get('amount')
split_move_line['price'] = invl.cost_price
split_move_line['price_unit'] = invl.cost_price
split_move_line['tax_ids'] = [(4, self.env.ref('l10n_be.1_attn_VAT-OUT-00-L').id, None)]
iml.append(split_move_line)
split_move_line1['price'] = (margin / (1+(tax_amount/100)))
split_move_line1['price_unit'] = margin / (1+(tax_amount/100))
iml.append(split_move_line1)
else:
iml.append(move_line)
iml += inv.tax_line_move_line_get()
diff_currency = inv.currency_id != company_currency
# create one move line for the total and possibly adjust the other lines amount
total, total_currency, iml = inv.with_context(ctx).compute_invoice_totals(company_currency, iml)
name = inv.name or '/'
if inv.payment_term_id:
totlines = \
inv.with_context(ctx).payment_term_id.with_context(currency_id=company_currency.id).compute(total,
inv.date_invoice)[
0]
res_amount_currency = total_currency
ctx['date'] = inv._get_currency_rate_date()
for i, t in enumerate(totlines):
if inv.currency_id != company_currency:
amount_currency = company_currency.with_context(ctx)._convert(t[1], inv.currency_id, inv.company_id, inv.date_invoice or fields.Date.today())
else:
amount_currency = False
# last line: add the diff
res_amount_currency -= amount_currency or 0
if i + 1 == len(totlines):
amount_currency += res_amount_currency
iml.append({
'type': 'dest',
'name': name,
'price': t[1],
'account_id': inv.account_id.id,
'date_maturity': t[0],
'amount_currency': diff_currency and amount_currency,
'currency_id': diff_currency and inv.currency_id.id,
'invoice_id': inv.id
})
else:
iml.append({
'type': 'dest',
'name': name,
'price': total,
'account_id': inv.account_id.id,
'date_maturity': inv.date_due,
'amount_currency': diff_currency and total_currency,
'currency_id': diff_currency and inv.currency_id.id,
'invoice_id': inv.id
})
part = self.env['res.partner']._find_accounting_partner(inv.partner_id)
line = [(0, 0, self.line_get_convert(l, part.id)) for l in iml]
line = inv.group_lines(iml, line)
journal = inv.journal_id.with_context(ctx)
line = inv.finalize_invoice_move_lines(line)
date = inv.date or inv.date_invoice
move_vals = {
'ref': inv.reference,
'line_ids': line,
'journal_id': journal.id,
'date': date,
'narration': inv.comment,
}
ctx['company_id'] = inv.company_id.id
ctx['invoice'] = inv
ctx_nolang = ctx.copy()
ctx_nolang.pop('lang', None)
move = account_move.with_context(ctx_nolang).create(move_vals)
# Pass invoice in context in method post: used if you want to get the same
# account move reference when creating the same invoice after a cancelled one:
move.post()
# make the invoice point to that move
vals = {
'move_id': move.id,
'date': date,
'move_name': move.name,
}
inv.with_context(ctx).write(vals)
return True
class AccountInvoiceLine(models.Model):
_inherit = "account.invoice.line"
on_margin = fields.Boolean('Is on margin', compute='_compute_on_margin', store=True)
cost_price = fields.Float('Total Cost', compute='_compute_cost_price', store=True, oldname='real_cost')
compute_real_cost = fields.Float('Total Cost 1') # TODO: probably not used anymore
compute_cost_price = fields.Float('Total Cost 2') # TODO: probably not used anymore
invoice_type = fields.Selection(related='invoice_id.type')
@api.depends('invoice_line_tax_ids')
def _compute_on_margin(self):
for line in self:
line.on_margin = bool(line.invoice_line_tax_ids.filtered('on_margin'))
@api.multi
@api.depends('compute_cost_price', 'sale_line_ids.cost_price')
def _compute_cost_price(self):
for inv_line in self:
if not inv_line.compute_cost_price:
inv_line.cost_price = sum(inv_line.mapped('sale_line_ids.cost_price'))
else:
inv_line.cost_price = inv_line.compute_cost_price
def _compute_price_on_margin(self):
currency = self.invoice_id and self.invoice_id.currency_id or None
price = self.price_unit * (1 - (self.discount or 0.0) / 100.0)
qty = self.quantity
cost_price = self.invoice_id.company_id.currency_id._convert(
self.cost_price,
self.invoice_id.currency_id,
currency,
self.invoice_id.date_invoice or fields.Date.today()
)
taxes = self.invoice_line_tax_ids.compute_all(
price,
currency=currency,
quantity=qty,
product=self.product_id,
partner=self.invoice_id.partner_id,
margin=(price * qty) - cost_price
)
self.price_subtotal = price_subtotal_signed = taxes['total_excluded']
self.price_total = taxes['total_included']
if self.invoice_id.currency_id and self.invoice_id.currency_id != self.invoice_id.company_id.currency_id:
currency = self.invoice_id.currency_id
date = self.invoice_id._get_currency_rate_date()
price_subtotal_signed = currency._convert(
price_subtotal_signed,
self.invoice_id.company_id.currency_id,
self.company_id or self.env.user.company_id,
date or fields.Date.today()
)
sign = self.invoice_id.type in ['in_refund', 'out_refund'] and -1 or 1
self.price_subtotal_signed = price_subtotal_signed * sign
@api.one
@api.depends('price_unit', 'discount', 'invoice_line_tax_ids', 'quantity',
'product_id', 'invoice_id.partner_id', 'invoice_id.currency_id',
'invoice_id.company_id', 'invoice_id.date_invoice', 'invoice_id.date'
)
def _compute_price(self):
"""
"""
if self.filtered('invoice_line_tax_ids.on_margin'):
self._compute_price_on_margin()
else:
super(AccountInvoiceLine, self)._compute_price()
@api.onchange('product_id', 'invoice_line_tax_ids', 'quantity', 'price_unit', 'discount')
def _onchange_product_id_tax(self):
margin = (self.quantity * self.price_unit) - self.cost_price
if any(self.invoice_line_tax_ids.mapped('on_margin')) and margin <= 0.00:
self.invoice_line_tax_ids = [(6, 0, [self.env.ref('tax_margin.tax_margin_0').id])]
return {
'warning': {
'title': _('Tax'),
'message': _('Attention votre tax a été modifié car vous n\'avez pas de marge sur cette ligne'),
}
}
class AccountInvoiceTax(models.Model):
_inherit = "account.invoice.tax"
on_margin = fields.Boolean('Is on margin', oldname='is_on_margin')
| null |
tax_margin/models/account_invoice.py
|
account_invoice.py
|
py
| 16,586 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "odoo.models.Model",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "odoo.models",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Boolean",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Binary",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "odoo.api.one",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "odoo.api",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "odoo.api.depends",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "odoo.api",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "odoo.api.depends",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "odoo.api",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "functools.partial",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "odoo.tools.misc.formatLang",
"line_number": 52,
"usage_type": "argument"
},
{
"api_name": "odoo.fields.Date.today",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "odoo.fields.Date",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "odoo.fields",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "odoo.api.multi",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "odoo.api",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Date.today",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "odoo.fields.Date",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "odoo.fields",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "odoo.api.model",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "odoo.api",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "odoo.exceptions.UserError",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "odoo._",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "odoo.api.multi",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "odoo.api",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "odoo.exceptions.UserError",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "odoo._",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "odoo.exceptions.UserError",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "odoo._",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "odoo.fields.Date.context_today",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "odoo.fields.Date",
"line_number": 221,
"usage_type": "attribute"
},
{
"api_name": "odoo.fields",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Date.today",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "odoo.fields.Date",
"line_number": 275,
"usage_type": "attribute"
},
{
"api_name": "odoo.fields",
"line_number": 275,
"usage_type": "name"
},
{
"api_name": "odoo.api.multi",
"line_number": 205,
"usage_type": "attribute"
},
{
"api_name": "odoo.api",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "odoo.models.Model",
"line_number": 338,
"usage_type": "attribute"
},
{
"api_name": "odoo.models",
"line_number": 338,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Boolean",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 342,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Float",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 343,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Float",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 344,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Float",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 345,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Selection",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 346,
"usage_type": "name"
},
{
"api_name": "odoo.api.depends",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "odoo.api",
"line_number": 348,
"usage_type": "name"
},
{
"api_name": "odoo.api.multi",
"line_number": 353,
"usage_type": "attribute"
},
{
"api_name": "odoo.api",
"line_number": 353,
"usage_type": "name"
},
{
"api_name": "odoo.api.depends",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "odoo.api",
"line_number": 354,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Date.today",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "odoo.fields.Date",
"line_number": 372,
"usage_type": "attribute"
},
{
"api_name": "odoo.fields",
"line_number": 372,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Date.today",
"line_number": 393,
"usage_type": "call"
},
{
"api_name": "odoo.fields.Date",
"line_number": 393,
"usage_type": "attribute"
},
{
"api_name": "odoo.fields",
"line_number": 393,
"usage_type": "name"
},
{
"api_name": "odoo.api.one",
"line_number": 399,
"usage_type": "attribute"
},
{
"api_name": "odoo.api",
"line_number": 399,
"usage_type": "name"
},
{
"api_name": "odoo.api.depends",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "odoo.api",
"line_number": 400,
"usage_type": "name"
},
{
"api_name": "odoo._",
"line_number": 421,
"usage_type": "call"
},
{
"api_name": "odoo._",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "odoo.api.onchange",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "odoo.api",
"line_number": 413,
"usage_type": "name"
},
{
"api_name": "odoo.models.Model",
"line_number": 427,
"usage_type": "attribute"
},
{
"api_name": "odoo.models",
"line_number": 427,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Boolean",
"line_number": 431,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 431,
"usage_type": "name"
}
] |
620613938
|
import cv2 as cv
import numpy as np
blank = np.zeros((400,400), dtype='uint8')
rectangle = cv.rectangle(blank.copy(),(30,30),(370,370),255,-1)
circle = cv.circle(blank.copy(),(200,200),200,255,-1)
cv.imshow('Rectangle',rectangle)
cv.imshow('Circle',circle)
#bitwise And
bitwise_and = cv.bitwise_and(rectangle,circle)
cv.imshow('bitwise And',bitwise_and)
#similarly bitwise or bitwise Xor and not can be performed.
cv.waitKey(0)
| null |
bitwise.py
|
bitwise.py
|
py
| 433 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.zeros",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.bitwise_and",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 18,
"usage_type": "call"
}
] |
138650590
|
#!/usr/bin/env python
import argparse
import json
import logging
import sys
import time
import requests
"""
Fetches events from Remora and ingests into KairosDB.
"""
KAIROS_WRITE_PATH = "/api/v1/datapoints"
def sum_lag(consumer_group_name, consumer_group_data):
metrics = {}
for partition in consumer_group_data['partition_assignment']:
if {'topic', 'lag'} <= partition.keys():
metrics[partition['topic']] = metrics.get(
partition['topic'], 0) + partition.get('lag', 0)
results = []
for topic, lag in metrics.items():
results.append((consumer_group_name, topic, lag))
return results
def push_data(kairosdb_api, consumer_group_lag):
for (consumer_group_name, topic, lag) in consumer_group_lag:
metric = {
"name": consumer_group_name,
"timestamp": int(round(time.time() * 1000)),
"value": lag,
"tags": {
"topic": topic
}
}
logging.info(
"pushing information for consumer group %s, topic %s with value %s" % (consumer_group_name, topic, lag))
kairosdb_url = "".join([kairosdb_api, KAIROS_WRITE_PATH])
res = requests.post(kairosdb_url, json.dumps(metric))
logging.debug(res.text)
def ingest(remora_api, kairosdb_api):
logging.info("Fetching events from Remora")
consumer_groups_url = "".join([remora_api, "/consumers"])
consumer_groups = requests.get(consumer_groups_url).json()
logging.info("Got %s consumer groups" % (", ".join(consumer_groups)))
for consumer_group_name in consumer_groups:
logging.info("querying %s" % consumer_group_name)
consumer_group_data_url = "".join(
[remora_api, "/consumers/", consumer_group_name])
consumer_group_data = requests.get(consumer_group_data_url).json()
push_data(kairosdb_api, sum_lag(
consumer_group_name, consumer_group_data))
if __name__ == "__main__":
try:
parser = argparse.ArgumentParser(
description="Fetches events from Remora and ingests it into KairosDB.",
epilog="Example: remora-fetcher.py -r http://52.11.127.207:9000")
parser.add_argument("-k", action="store", dest="kairosdb_api", default="http://localhost:8080",
help="The URL of the KairosDB API, the default is `http://localhost:8080`")
parser.add_argument("-r", action="store", dest="remora_api", default="http://localhost:9000",
help="The URL of the Remora API, the default is `http://localhost:9000`")
parser.add_argument("-p", action="store", dest="poll_interval", default=10,
type=int, help="The poll interval in seconds, the default is 10")
parser.add_argument("-d", action="store_true", dest="enable_debug",
default=False, help="Enables debug messages")
args = parser.parse_args()
if args.enable_debug:
FORMAT = "%(asctime)-0s %(levelname)s %(message)s [at line %(lineno)d]"
logging.basicConfig(level=logging.DEBUG,
format=FORMAT, datefmt="%Y-%m-%dT%I:%M:%S")
else:
FORMAT = "%(asctime)-0s %(message)s"
logging.basicConfig(level=logging.INFO,
format=FORMAT, datefmt="%Y-%m-%dT%I:%M:%S")
logging.getLogger("requests").setLevel(logging.WARNING)
logging.debug("Arguments %s" % args)
while True:
ingest(args.remora_api, args.kairosdb_api)
time.sleep(args.poll_interval)
except Exception as e:
logging.error(e)
sys.exit(1)
| null |
remora-fetcher.py
|
remora-fetcher.py
|
py
| 3,728 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "time.time",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "logging.WARNING",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "logging.debug",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 107,
"usage_type": "call"
}
] |
71224660
|
import numpy as np
import astropy.units as u
from astropy import constants as const
from astropy.table import QTable
from glue.core.message import (SubsetCreateMessage,
SubsetDeleteMessage,
SubsetUpdateMessage)
from traitlets import Bool, Float, Int, List, Unicode, Dict, observe
from jdaviz.core.custom_traitlets import FloatHandleEmpty
from jdaviz.core.events import (AddDataMessage,
RemoveDataMessage,
AddLineListMessage,
SnackbarMessage,
RedshiftMessage)
from jdaviz.core.registries import tray_registry
from jdaviz.core.template_mixin import TemplateMixin
from jdaviz.core.linelists import load_preset_linelist
from jdaviz.core.marks import SpectralLine
from jdaviz.core.validunits import create_spectral_equivalencies_list
__all__ = ['LineListTool']
@tray_registry('g-line-list', label="Line Lists")
class LineListTool(TemplateMixin):
dialog = Bool(False).tag(sync=True)
template_file = __file__, "line_lists.vue"
rs_enabled = Bool(False).tag(sync=True) # disabled until lines are plotted
rs_slider = Float(0).tag(sync=True) # in units of delta-redshift
rs_slider_range_auto = Bool(True).tag(sync=True)
rs_slider_half_range = Float(0.1).tag(sync=True)
rs_slider_step_auto = Bool(True).tag(sync=True)
rs_slider_step = Float(0.01).tag(sync=True)
rs_redshift_step = Float(1).tag(sync=True)
rs_slider_ndigits = Int(1).tag(sync=True)
rs_redshift = FloatHandleEmpty(0).tag(sync=True)
rs_rv = FloatHandleEmpty(0).tag(sync=True)
rs_slider_throttle = Int(100).tag(sync=True)
dc_items = List([]).tag(sync=True)
available_lists = List([]).tag(sync=True)
loaded_lists = List([]).tag(sync=True)
list_contents = Dict({}).tag(sync=True)
custom_name = Unicode().tag(sync=True)
custom_rest = Unicode().tag(sync=True)
custom_unit_choices = List([]).tag(sync=True)
custom_unit = Unicode().tag(sync=True)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._viewer = self.app.get_viewer("spectrum-viewer")
self._spectrum1d = None
self.available_lists = self._viewer.available_linelists()
self.list_to_load = None
self.loaded_lists = ["Custom"]
self.list_contents = {"Custom": {"lines": [], "color": "#FF0000FF"}}
self.line_mark_dict = {}
self._units = {}
self._bounds = {}
self._global_redshift = 0
self._rs_disable_observe = False
self._rs_pause_tables = False
# Watch for messages from Specviz helper redshift functions
self.hub.subscribe(self, RedshiftMessage,
handler=self._parse_redshift_msg)
self.hub.subscribe(self, AddDataMessage,
handler=self._on_viewer_data_changed)
self.hub.subscribe(self, RemoveDataMessage,
handler=self._on_viewer_data_changed)
self.hub.subscribe(self, SubsetCreateMessage,
handler=lambda x: self._on_viewer_data_changed())
self.hub.subscribe(self, SubsetDeleteMessage,
handler=lambda x: self._on_viewer_data_changed())
self.hub.subscribe(self, SubsetUpdateMessage,
handler=lambda x: self._on_viewer_data_changed())
self.hub.subscribe(self, AddLineListMessage,
handler=self._list_from_notebook)
# if set to auto (default), update the slider range when zooming on the spectrum viewer
self._viewer.scales['x'].observe(self._auto_slider_range, names=['min', 'max'])
def _on_viewer_data_changed(self, msg=None):
"""
Callback method for when data is added or removed from a viewer, or
when a subset is created, deleted, or updated. This method receives
a glue message containing viewer information in the case of the former
set of events, and updates the units in which to display the lines.
Notes
-----
We do not attempt to parse any data at this point, at it can cause
visible lag in the application.
Parameters
----------
msg : `glue.core.Message`
The glue message passed to this callback method.
"""
self._viewer_id = self.app._viewer_item_by_reference(
'spectrum-viewer').get('id')
# Subsets are global and are not linked to specific viewer instances,
# so it's not required that we match any specific ids for that case.
# However, if the msg is not none, check to make sure that it's the
# viewer we care about and that the message contains the data label.
if msg is None or msg.viewer_id != self._viewer_id or msg.data is None:
return
label = msg.data.label
try:
viewer_data = self.app.get_data_from_viewer('spectrum-viewer').get(label)
except TypeError:
warn_message = SnackbarMessage("Line list plugin could not retrieve data from viewer",
sender=self, color="error")
self.hub.broadcast(warn_message)
return
# If no data is currently plotted, don't attempt to update
if viewer_data is None:
return
self._units["x"] = str(viewer_data.spectral_axis.unit)
self._units["y"] = str(viewer_data.flux.unit)
self._bounds["min"] = viewer_data.spectral_axis[0]
self._bounds["max"] = viewer_data.spectral_axis[-1]
# set redshift slider to redshift stored in Spectrum1D object
self.rs_redshift = (viewer_data.redshift.value
if hasattr(viewer_data.redshift, 'value')
else viewer_data.redshift)
self._auto_slider_range() # will also trigger _auto_slider_step
# set the choices (and default) for the units for new custom lines
self.custom_unit_choices = create_spectral_equivalencies_list(viewer_data)
self.custom_unit = str(viewer_data.spectral_axis.unit)
def _parse_redshift_msg(self, msg):
'''
Handle incoming redshift messages from the app hub. Generally these
will be created by Specviz helper methods.
'''
if msg.sender == self:
return
param = msg.param
if param == "rs_slider_range":
if msg.value == 'auto':
# observer will handle setting rs_slider_range
self.rs_slider_range_auto = True
else:
self.rs_slider_range_auto = False
self.rs_slider_half_range = float(msg.value)/2
elif param == "rs_slider_step":
if msg.value == 'auto':
# observer will handle setting rs_slider_step
self.rs_slider_step_auto = True
else:
self.rs_slider_step_auto = False
slider_step = float(msg.value)
if slider_step > self.rs_slider_half_range:
raise ValueError("step must be smaller than range/2")
self.rs_slider_step = slider_step
self.rs_redshift_step = self._redshift_to_velocity(slider_step)
elif param == "redshift":
# NOTE: this should trigger the observe to update rs_rv, line positions, and
# update self._global_redshift
self.rs_redshift = float(msg.value)
elif param == 'rv':
# NOTE: this should trigger the observe to update rs_redshift, line positions, and
# update self._global_redshift
self.rs_rv = float(msg.value)
else:
raise NotImplementedError(f"RedshiftMessage with param {param} not implemented.")
def _velocity_to_redshift(self, velocity):
"""
Convert a velocity to a relativistic redshift. Assumes km/s (float)
as input and returns float.
"""
# NOTE: if supporting non-km/s units in the future, try to leave
# the default case to avoid quantity math as below for efficiency
beta = velocity * 1000 / const.c.value
return np.sqrt((1 + beta) / (1 - beta)) - 1
def _redshift_to_velocity(self, redshift):
"""
Convert a relativistic redshift to a velocity. Returns
in km/s (float)
"""
zponesq = (1 + redshift) ** 2
# NOTE: if supporting non-km/s units in the future, try to leave
# the default case to avoid quantity math as below for efficiency
return const.c.value * (zponesq - 1) / (zponesq + 1) / 1000 # km/s
def _update_line_positions(self):
# update all lines, self._global_redshift, and emit message back to Specviz helper
z = u.Quantity(self.rs_redshift)
for mark in self.app.get_viewer('spectrum-viewer').figure.marks:
# update ALL to this redshift, if adding support for per-line redshift
# this logic will need to change to not affect ALL lines
if not isinstance(mark, SpectralLine):
continue
mark.redshift = z
@observe('rs_slider')
def _on_slider_updated(self, event):
if self._rs_disable_observe:
return
self._rs_pause_tables = True
# NOTE: _on_rs_redshift_updated will handle updating rs_rv
# NOTE: the input has a custom @input method in line_lists.vue to cast
# to float so that we can assume its a float here to minimize lag
# when interacting with the slider.
self.rs_redshift = np.round(self.rs_redshift + event['new'] - event['old'],
self.rs_slider_ndigits)
@observe('rs_redshift')
def _on_rs_redshift_updated(self, event):
if self._rs_disable_observe:
return
if not isinstance(event['new'], float):
# then blank or None or '.'
return
value = event['new']
# update _global_redshift so new lines, etc, will adopt this latest value
self._global_redshift = value
self._rs_disable_observe = True
self.rs_rv = self._redshift_to_velocity(value)
self._rs_disable_observe = False
self._update_line_positions()
if not self._rs_pause_tables:
# Send the redshift back to the Specviz helper (and also trigger
# self._update_global_redshift)
msg = RedshiftMessage("redshift", self.rs_redshift, sender=self)
self.app.hub.broadcast(msg)
def vue_unpause_tables(self, event=None):
# after losing focus, update any elements that were paused during changes
self._rs_pause_tables = False
self._rs_disable_observe = False
self._on_rs_redshift_updated({'new': self.rs_redshift})
@observe('rs_rv')
def _on_rs_rv_updated(self, event):
if self._rs_disable_observe:
return
if not isinstance(event['new'], float):
# then blank or None or '.'
return
value = event['new']
redshift = self._velocity_to_redshift(value)
# prevent update the redshift from propagating back to an update in the rv
self._rs_disable_observe = True
# we'll wait until the blur event (which will call vue_unpause_tables)
# to update the value in the MOS table
self._rs_pause_tables = True
self.rs_redshift = redshift
# but we do want to update the plotted lines
self._update_line_positions()
self._rs_disable_observe = False
def vue_slider_reset(self, event):
self._rs_disable_observe = True
self.rs_slider = 0.0
self._rs_disable_observe = False
self._rs_pause_tables = False
# the redshift value in the MOS table wasn't updating during slide, so update them now
self.vue_unpause_tables()
def _auto_slider_range(self, event=None):
if not self.rs_slider_range_auto:
return
# if set to auto, default the range based on the limits of the spectrum plot
sv = self.app.get_viewer('spectrum-viewer')
x_min, x_max = sv.state.x_min, sv.state.x_max
x_mid = abs(x_max + x_min) / 2.
# we'll *estimate* the redshift range to shift the range of the viewer
# (for a line with a rest wavelength in the center of the viewer),
# by taking abs, this will work for wavelength or frequency units.
half_range = abs(x_max - x_min) / x_mid
ndec = -np.log10(half_range)
if ndec > 0:
# round to at least 2 digits, or the first significant digit
ndec = np.max([2, int(np.ceil(ndec))])
else:
ndec = 1
half_range = np.round(half_range, ndec)
# this will trigger self._auto_slider_step to set self.rs_slider_step and
# self.rs_redshift_step, if applicable
self.rs_slider_half_range = half_range
@observe('rs_slider_range_auto')
def _on_rs_slider_range_auto_updated(self, event):
if event['new']:
self._auto_slider_range()
@observe('rs_slider_half_range')
def _auto_slider_step(self, event=None):
if not self.rs_slider_step_auto:
return
# if set to auto, default to 1000 steps in the range
self.rs_slider_step = self.rs_slider_half_range * 2 / 1000
self.rs_redshift_step = abs(self._redshift_to_velocity(self._global_redshift+self.rs_slider_step) - self.rs_rv) # noqa
@observe('rs_slider_step')
def _on_rs_slider_step_updated(self, event):
# When using the slider, we'll "round" redshift to the digits in the
# slider step to avoid extra digits due to rounding errors
ndec = -np.log10(event['new'])
if ndec > 0:
# round to at least 2 digits, or one past the first significant digit
# note: the UI will not show trailing zeros, we just want to avoid
# and 1 at floating point precision if not significant
ndec = np.max([2, np.ceil(ndec)+1])
else:
ndec = 1
self.rs_slider_ndigits = int(ndec)
@observe('rs_slider_step_auto')
def _on_rs_slider_step_auto_updated(self, event):
if event['new']:
self._auto_slider_step()
def _update_global_redshift(self, msg):
'''Handle updates to the Specviz redshift slider, to apply to lines'''
if msg.param == "redshift":
self._global_redshift = msg.value
def _list_from_notebook(self, msg):
"""
Callback method for when a spectral line list is added to the specviz
instance from the notebook.
Parameters
----------
msg : `glue.core.Message`
The glue message passed to this callback method. Includes the line
data added in msg.table.
"""
loaded_lists = self.loaded_lists
list_contents = self.list_contents
tmp_names_rest = []
for row in msg.table:
if row["listname"] not in loaded_lists:
loaded_lists.append(row["listname"])
if row["listname"] not in list_contents:
list_contents[row["listname"]] = {"lines": [], "color": "#FF0000FF"}
temp_dict = {"linename": row["linename"],
"rest": row["rest"].value,
"unit": str(row["rest"].unit),
"colors": row["colors"] if "colors" in row else "#FF0000FF",
"show": row["show"],
"name_rest": row["name_rest"],
"redshift": row["redshift"] if "redshift" in row else
self._global_redshift}
list_contents[row["listname"]]["lines"].append(temp_dict)
tmp_names_rest.append(row["name_rest"])
self.loaded_lists = []
self.loaded_lists = loaded_lists
self.list_contents = {}
self.list_contents = list_contents
self._viewer.plot_spectral_lines(tmp_names_rest)
self.update_line_mark_dict()
msg_text = ("Spectral lines loaded from notebook. Lines can be hidden"
"/shown in the Line Lists plugin")
lines_loaded_message = SnackbarMessage(msg_text, sender=self,
color="success", timeout=15000)
self.hub.broadcast(lines_loaded_message)
def update_line_mark_dict(self):
self.line_mark_dict = {}
for m in self._viewer.figure.marks:
if isinstance(m, SpectralLine):
self.line_mark_dict[m.table_index] = m
n_lines_shown = len(self.line_mark_dict)
# redshift controls are enabled if any lines are currently plotted
self.rs_enabled = n_lines_shown > 0
if n_lines_shown > 0:
# with a lot of lines, a quick slider move will lag. Let's scale the
# timeout based on the number of lines, roughtly between 50-500 ms
throttle = n_lines_shown * 5
if throttle < 50:
throttle = 50
if throttle > 500:
throttle = 500
self.rs_slider_throttle = throttle
def vue_list_selected(self, event):
"""
Handle list selection from presets dropdown selector
"""
self.list_to_load = event
def vue_load_list(self, event):
"""
Load one of the preset line lists, storing it's info in a
vuetify-friendly manner in addition to loading the astropy table into
the viewer's spectral_lines attribute.
"""
# Don't need to reload an already loaded list
if self.list_to_load in self.loaded_lists:
return
temp_table = load_preset_linelist(self.list_to_load)
# Also store basic list contents in a form that vuetify can handle
# Adds line style parameters that can be changed on the front end
temp_table["colors"] = "#FF0000FF"
# Load the table into the main astropy table and get it back, to make
# sure all values match between the main table and local plugin
temp_table = self._viewer.load_line_list(temp_table, return_table=True,
show=False)
line_list_dict = {"lines": [], "color": "#FF000080"}
# extra_fields = [x for x in temp_table.colnames if x not in
# ("linename", "rest", "name_rest")]
for row in temp_table:
temp_dict = {"linename": row["linename"],
"rest": row["rest"].value,
"unit": str(row["rest"].unit),
"colors": row["colors"],
"show": False,
"name_rest": str(row["name_rest"]),
"redshift": self._global_redshift}
# for field in extra_fields:
# temp_dict[field] = row[field]
line_list_dict["lines"].append(temp_dict)
list_contents = self.list_contents
list_contents[self.list_to_load] = line_list_dict
self.list_contents = {}
self.list_contents = list_contents
loaded_lists = self.loaded_lists + [self.list_to_load]
self.loaded_lists = []
self.loaded_lists = loaded_lists
self._viewer.plot_spectral_lines()
self.update_line_mark_dict()
msg_text = ("Spectral lines loaded from preset. Lines can be shown/hidden"
f" in the {self.list_to_load} dropdown in the Line Lists plugin")
lines_loaded_message = SnackbarMessage(msg_text, sender=self,
color="success", timeout=15000)
self.hub.broadcast(lines_loaded_message)
def vue_add_custom_line(self, event):
"""
Add a line to the "Custom" line list from UI input
"""
list_contents = self.list_contents
temp_dict = {"linename": self.custom_name,
"rest": float(self.custom_rest),
"unit": self.custom_unit,
"colors": list_contents["Custom"]["color"],
"show": True,
"redshift": self._global_redshift
}
# Add to viewer astropy table
with u.set_enabled_equivalencies(u.spectral()):
temp_table = QTable()
temp_table["linename"] = [temp_dict["linename"]]
temp_table["rest"] = [temp_dict["rest"]*u.Unit(temp_dict["unit"])]
temp_table["colors"] = [temp_dict["colors"]]
temp_table["redshift"] = [temp_dict["redshift"]]
temp_table = self._viewer.load_line_list(temp_table, return_table=True)
# Add line to Custom lines in local list
temp_dict["name_rest"] = str(temp_table[0]["name_rest"])
list_contents["Custom"]["lines"].append(temp_dict)
self.list_contents = {}
self.list_contents = list_contents
self._viewer.plot_spectral_line(temp_dict["name_rest"])
self.update_line_mark_dict()
lines_loaded_message = SnackbarMessage("Custom spectral line loaded",
sender=self, color="success")
self.hub.broadcast(lines_loaded_message)
def vue_show_all_in_list(self, listname):
"""
Toggle all lines in list to be visible
"""
lc = self.list_contents
for line in lc[listname]["lines"]:
line["show"] = True
self._viewer.spectral_lines.loc[line["name_rest"]]["show"] = True
# Trick traitlets into updating
self.list_contents = {}
self.list_contents = lc
self._viewer.plot_spectral_lines()
self.update_line_mark_dict()
def vue_hide_all_in_list(self, listname):
"""
Toggle all lines in list to be hidden
"""
lc = self.list_contents
name_rests = []
for line in lc[listname]["lines"]:
line["show"] = False
name_rests.append(line["name_rest"])
# Trick traitlets into updating
self.list_contents = {}
self.list_contents = lc
self._viewer.erase_spectral_lines(name_rest=name_rests)
self.update_line_mark_dict()
def vue_plot_all_lines(self, event):
"""
Plot all the currently loaded lines in the viewer
"""
if self._viewer.spectral_lines is None:
warn_message = SnackbarMessage("No spectral lines loaded to plot",
sender=self, color="error")
self.hub.broadcast(warn_message)
return
lc = self.list_contents
for listname in lc:
for line in lc[listname]["lines"]:
line["show"] = True
self._viewer.spectral_lines["show"] = True
# Trick traitlets into updating
self.list_contents = {}
self.list_contents = lc
self._viewer.plot_spectral_lines()
self.update_line_mark_dict()
def vue_erase_all_lines(self, event):
"""
Erase all lines from the viewer
"""
if self._viewer.spectral_lines is None:
warn_message = SnackbarMessage("No spectral lines to erase",
sender=self, color="error")
self.hub.broadcast(warn_message)
return
lc = self.list_contents
for listname in lc:
for line in lc[listname]["lines"]:
line["show"] = False
# Trick traitlets into updating
self.list_contents = {}
self.list_contents = lc
self._viewer.erase_spectral_lines()
self.update_line_mark_dict()
def vue_change_visible(self, line):
"""
Plot or erase a single line as needed when "Visible" checkbox is changed
"""
name_rest = line["name_rest"]
if line["show"]:
self._viewer.plot_spectral_line(name_rest)
else:
self._viewer.erase_spectral_lines(name_rest=name_rest)
self.update_line_mark_dict()
def vue_set_color(self, data):
"""
Change the color either of all members of a line list, or of an
individual line.
"""
color = data['color']
if "listname" in data:
listname = data["listname"]
lc = self.list_contents[listname]
lc["color"] = color
for line in lc["lines"]:
line["colors"] = color
# Update the astropy table entry
name_rest = line["name_rest"]
self._viewer.spectral_lines.loc[name_rest]["colors"] = color
# Update the color on the plot
if name_rest in self.line_mark_dict:
self.line_mark_dict[name_rest].colors = [color]
elif "linename" in data:
pass
def vue_remove_list(self, listname):
"""
Method to remove line list from available expansion panels when the x
on the panel header is clicked. Also removes line marks from plot and
updates the "show" value in the astropy table to False.
"""
lc = self.list_contents[listname]
name_rests = []
for line in lc["lines"]:
name_rests.append(self.vue_remove_line(line, erase=False))
self._viewer.erase_spectral_lines(name_rest=name_rests)
self.update_line_mark_dict()
self.loaded_lists = [x for x in self.loaded_lists if x != listname]
self.list_contents = {k: v for k, v in self.list_contents.items() if k != listname}
row_inds = [i for i, ln in
enumerate(self._viewer.spectral_lines['listname'])
if ln == listname]
self._viewer.spectral_lines.remove_rows(row_inds)
def vue_remove_line(self, line, erase=True):
"""
Method to remove a line from the plot when the line is deselected in
the expansion panel content. Input must have "linename" and "rest"
values for indexing on the astropy table.
"""
name_rest = line["name_rest"]
# Keep in our spectral line astropy table, but set it to not show on plot
self._viewer.spectral_lines.loc[name_rest]["show"] = False
# Remove the line from the plot marks
if erase:
try:
self._viewer.erase_spectral_lines(name_rest=name_rest)
del(self.line_mark_dict[name_rest])
except KeyError:
raise KeyError("line marks: {}".format(self._viewer.figure.marks))
else:
return name_rest
| null |
jdaviz/configs/default/plugins/line_lists/line_lists.py
|
line_lists.py
|
py
| 26,894 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "jdaviz.core.template_mixin.TemplateMixin",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "traitlets.Bool",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "traitlets.Bool",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "traitlets.Float",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "traitlets.Bool",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "traitlets.Float",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "traitlets.Bool",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "traitlets.Float",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "traitlets.Float",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "traitlets.Int",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "jdaviz.core.custom_traitlets.FloatHandleEmpty",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "jdaviz.core.custom_traitlets.FloatHandleEmpty",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "traitlets.Int",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "traitlets.List",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "traitlets.List",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "traitlets.List",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "traitlets.Dict",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "traitlets.Unicode",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "traitlets.Unicode",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "traitlets.List",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "traitlets.Unicode",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "jdaviz.core.events.RedshiftMessage",
"line_number": 68,
"usage_type": "argument"
},
{
"api_name": "jdaviz.core.events.AddDataMessage",
"line_number": 71,
"usage_type": "argument"
},
{
"api_name": "jdaviz.core.events.RemoveDataMessage",
"line_number": 74,
"usage_type": "argument"
},
{
"api_name": "glue.core.message.SubsetCreateMessage",
"line_number": 77,
"usage_type": "argument"
},
{
"api_name": "glue.core.message.SubsetDeleteMessage",
"line_number": 80,
"usage_type": "argument"
},
{
"api_name": "glue.core.message.SubsetUpdateMessage",
"line_number": 83,
"usage_type": "argument"
},
{
"api_name": "jdaviz.core.events.AddLineListMessage",
"line_number": 86,
"usage_type": "argument"
},
{
"api_name": "jdaviz.core.events.SnackbarMessage",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "jdaviz.core.validunits.create_spectral_equivalencies_list",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "astropy.constants.c",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "astropy.constants",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "numpy.sqrt",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "astropy.constants.c",
"line_number": 205,
"usage_type": "attribute"
},
{
"api_name": "astropy.constants",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "astropy.units.Quantity",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "astropy.units",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "jdaviz.core.marks.SpectralLine",
"line_number": 214,
"usage_type": "argument"
},
{
"api_name": "numpy.round",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "traitlets.observe",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "jdaviz.core.events.RedshiftMessage",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "traitlets.observe",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "traitlets.observe",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "numpy.ceil",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "traitlets.observe",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "traitlets.observe",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "numpy.ceil",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "traitlets.observe",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "traitlets.observe",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "jdaviz.core.events.SnackbarMessage",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "jdaviz.core.marks.SpectralLine",
"line_number": 403,
"usage_type": "argument"
},
{
"api_name": "jdaviz.core.linelists.load_preset_linelist",
"line_number": 436,
"usage_type": "call"
},
{
"api_name": "jdaviz.core.events.SnackbarMessage",
"line_number": 477,
"usage_type": "call"
},
{
"api_name": "astropy.units.set_enabled_equivalencies",
"line_number": 496,
"usage_type": "call"
},
{
"api_name": "astropy.units",
"line_number": 496,
"usage_type": "name"
},
{
"api_name": "astropy.units.spectral",
"line_number": 496,
"usage_type": "call"
},
{
"api_name": "astropy.table.QTable",
"line_number": 497,
"usage_type": "call"
},
{
"api_name": "astropy.units.Unit",
"line_number": 499,
"usage_type": "call"
},
{
"api_name": "astropy.units",
"line_number": 499,
"usage_type": "name"
},
{
"api_name": "jdaviz.core.events.SnackbarMessage",
"line_number": 513,
"usage_type": "call"
},
{
"api_name": "jdaviz.core.events.SnackbarMessage",
"line_number": 553,
"usage_type": "call"
},
{
"api_name": "jdaviz.core.events.SnackbarMessage",
"line_number": 574,
"usage_type": "call"
},
{
"api_name": "jdaviz.core.registries.tray_registry",
"line_number": 25,
"usage_type": "call"
}
] |
540167651
|
import argparse
import contextlib
import json
import logging
import os
import shutil
import subprocess
import unicodedata
import fs
from typing import (
Dict,
IO,
Iterator,
Tuple,
)
EXTENSIONS = (
'.flac',
'.m4a',
'.mp3',
'.mp4',
'.opus',
'.wma',
)
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('destination')
parser.add_argument('--cleanup', action='store_true')
parser.add_argument('--loglevel', default=logging.INFO)
args = parser.parse_args()
logging.basicConfig(level=args.loglevel)
logging.getLogger('paramiko.transport').setLevel(logging.ERROR)
myfs = fs.open_fs(args.source)
#
# Scan the destination instead of checking each artist/album later with
# isdir - that's slow when the source contains a lot of music.
#
destdirs = set()
try:
for top in os.scandir(args.destination):
if top.is_dir():
for bottom in os.scandir(top.path):
if bottom.is_dir():
#
# Some devices do weird things to Unicode pathnames.
#
relpath = os.path.join(top.name, bottom.name)
destdirs.add(relpath)
except FileNotFoundError:
#
# If the destination does not exist at all.
#
pass
def getsize(dirpath: str) -> int:
#
# ignores subdirectories entirely
#
mb = 0
for f in myfs.scandir(dirpath, namespaces=['details']):
if not f.is_dir:
mb += f.size // 1024**2
return mb
def scan(source: str) -> Iterator[Tuple[str, int]]:
for topdir in sorted(myfs.scandir('.'), key=lambda de: de.name):
if not topdir.is_dir:
continue
for bottomdir in sorted(myfs.scandir(topdir.name), key=lambda de: de.name):
if not bottomdir.is_dir:
continue
relpath = os.path.join(topdir.name, bottomdir.name)
yield relpath, getsize(relpath)
def print_buffer(cache: Dict, destination: str) -> Iterator[str]:
normdestdirs = {unicodedata.normalize('NFC', d) for d in destdirs}
for x in cache['subdirs']:
exists_in_dest = unicodedata.normalize('NFC', x['relpath']) in normdestdirs
if args.cleanup and not exists_in_dest:
continue
bottomdir_check = 'x' if exists_in_dest else ' '
print('[%s] (% 6d MB) %s' % (bottomdir_check, x['sizemb'], x['relpath']))
yield x['relpath']
def read_buffer(fin: IO[str]) -> Iterator[str]:
for i, line in enumerate(fin):
if not (line.startswith('[ ]') or line.startswith('[x]')):
raise ValueError('badness on line %d: %r' % (i, line))
yield line
def add(source_dir, dest_dir, to_add):
for relpath in to_add:
dest_path = os.path.join(args.destination, relpath)
os.makedirs(dest_path, exist_ok=True)
try:
contents = myfs.listdir(relpath)
except Exception as err:
logging.error('unable to copy: %r err: %r', relpath, err)
continue
for f in sorted(contents):
name, ext = os.path.splitext(f)
if f.startswith('._') or ext.lower() not in EXTENSIONS:
continue
copyfrom = os.path.join(relpath, f)
copyto = os.path.join(dest_path, f)
logging.info('cp %r %r', copyfrom, copyto)
with open(copyto, 'wb') as fout:
myfs.download(copyfrom, fout)
def remove(root_path, to_remove):
for relpath in to_remove:
dest_path = os.path.join(root_path, relpath)
assert dest_path.startswith(args.destination)
print('rm -rf %r' % dest_path)
shutil.rmtree(dest_path)
parent_path = os.path.dirname(dest_path)
assert parent_path.startswith(args.destination)
if not os.listdir(parent_path):
os.rmdir(parent_path)
print('rm -rf %r' % parent_path)
cache_name = 'ghettosync.json'
try:
with open(cache_name) as fin:
cache = json.load(fin)
except FileNotFoundError:
cache = {}
if cache.get('source') != args.source:
#
# Cache is invalid, repopulate.
#
cache = {
'source': args.source,
'subdirs': [{'relpath': r, 'sizemb': s} for (r, s) in scan(args.source)],
}
with open(cache_name, 'w') as fout:
json.dump(cache, fout, sort_keys=True, ensure_ascii=False)
#
# TODO: use tempfile here
#
fname = 'ghettosync.tmp'
with open(fname, 'w') as fout:
with contextlib.redirect_stdout(fout):
subdirs = list(print_buffer(cache, args.destination))
subprocess.check_call([os.environ.get('EDITOR', 'vim'), fname])
with open(fname, 'r') as fin:
lines = list(read_buffer(fin))
assert len(lines) == len(subdirs), 'number of lines has changed'
to_remove = []
to_add = []
for line, relpath in zip(lines, subdirs):
source_path = os.path.join(args.source, relpath)
dest_path = os.path.join(args.destination, relpath)
dest_exists = os.path.isdir(dest_path)
if relpath in destdirs and line.startswith('[ ]'):
to_remove.append(relpath)
elif not dest_exists and line.startswith('[x]'):
to_add.append(relpath)
#
# TODO:
#
# - check for enough disk space (net transfer)
# - sort top-level directory after adding files
# - rollback after incomplete directory copy
# - parallelize copying
#
if to_remove:
print('The following subdirectories will be removed:')
for x in to_remove:
print(x)
print('Is this OK? yes / [no]')
if input().lower() in ('y', 'yes'):
remove(args.destination, to_remove)
add(args.source, args.destination, to_add)
| null |
ghettosync.py
|
ghettosync.py
|
py
| 5,652 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "logging.ERROR",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "fs.open_fs",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.scandir",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.scandir",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "typing.Iterator",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "unicodedata.normalize",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "unicodedata.normalize",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "typing.Iterator",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "typing.IO",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "typing.Iterator",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "shutil.rmtree",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "os.rmdir",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "contextlib.redirect_stdout",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "subprocess.check_call",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 180,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 182,
"usage_type": "attribute"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.