seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
346578686
|
#!/usr/bin/python
import pycurl
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from bs4 import BeautifulSoup
URL = "https://bitcointalk.org/index.php?board=159.0"
def get_method(curl, url):
buffer = StringIO()
curl.setopt(curl.URL,url)
curl.setopt(curl.WRITEDATA, buffer)
curl.setopt(curl.USERAGENT, "Mozilla/5.0 (Windows NT 6.1; Win64; x64;en; rv:5.0) Gecko/20110619 Firefox/5.0")
curl.perform()
curl.close()
body = buffer.getvalue()
return body
c = pycurl.Curl()
content = get_method(c, URL)
soup = BeautifulSoup(content, 'html.parser')
anchors = soup.findAll('a')
anchrList = []
for anchr in anchors:
try:
if 'topic' in anchr['href'] and anchr['href'].endswith('0'):
anchrList.append(anchr['href'])
except:
pass
for anchor in anchrList:
c = pycurl.Curl()
content = get_method(c, anchor)
if 'mining' in content:
print(anchor)
| null |
findMineCoins.py
|
findMineCoins.py
|
py
| 1,070 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "io.StringIO",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pycurl.Curl",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pycurl.Curl",
"line_number": 47,
"usage_type": "call"
}
] |
32315326
|
import sys
from PyQt4 import QtGui, QtCore, QtWebKit
import folium
import io_geojson
import tweet
import point
import random
class Example(QtGui.QMainWindow):
def __init__(self):
super(Example, self).__init__()
self.initUI()
def initUI(self):
#textEdit = QtGui.QTextEdit()
#self.setCentralWidget(textEdit)
map_osm = folium.Map(location=[33.59359997467155, -111.94546800838894])
map_osm.save(r"./map.html")
exitAction = QtGui.QAction(QtGui.QIcon('exit24.png'), 'Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(self.close)
openFile = QtGui.QAction(QtGui.QIcon('open.png'), 'Open', self)
openFile.setShortcut('Ctrl+O')
openFile.setStatusTip('Open new File')
openFile.triggered.connect(self.showDialog)
self.webView = QtWebKit.QWebView()
self.webView.setHtml(open(r"./map.html").read())
self.setCentralWidget(self.webView)
self.statusBar()
menubar = self.menuBar()
menubar.setNativeMenuBar(False)
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(openFile)
fileMenu.addAction(exitAction)
toolbar = self.addToolBar('Open')
toolbar.addAction(openFile)
toolbar = self.addToolBar('Exit')
toolbar.addAction(exitAction)
self.setGeometry(300, 300, 600, 600)
self.setWindowTitle('Assignment 10')
self.show()
self.webView.show()
def showDialog(self):
fname = QtGui.QFileDialog.getOpenFileName(self, 'Open file', '/home')
if fname == '':
return
tweets=io_geojson.read_tweets(fname)
tweets_data=[]
for i in tweets:
tweets_data.append(tweet.Tweet(i))
average_lat = 0
average_lon = 0
count_tweets = 0
random.seed(1234)
for i in tweets_data:
lat, lon = i.gen_point_in_bounds()
average_lat += lat
average_lon += lon
count_tweets += 1
average_lon /= len(tweets_data)
average_lat /= len(tweets_data)
map_1 = folium.Map(location=[average_lat, average_lon])
countttz = 0
for i in tweets_data:
lat, lon = i.gen_point_in_bounds()
if countttz < 400:
#print(i.username)
folium.Marker([lat, lon], popup = "Test").add_to(map_1)
countttz+=1
map_1.save(r"./map.html")
self.webView.setHtml(open("./map.html").read())
self.webView.show()
def main():
app = QtGui.QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| null |
view.py
|
view.py
|
py
| 3,005 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "PyQt4.QtGui.QMainWindow",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "folium.Map",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui.QAction",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QIcon",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui.QAction",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QIcon",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtWebKit.QWebView",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtWebKit",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QFileDialog.getOpenFileName",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui.QFileDialog",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "io_geojson.read_tweets",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "tweet.Tweet",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "folium.Map",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "folium.Marker",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui.QApplication",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 105,
"usage_type": "call"
}
] |
324148864
|
# -*- coding:utf-8 -*-
import requests
import re
res = requests.get('https://www.google.co.jp/search?q=%E6%B2%96%E7%B8%84%E3%80%80%E9%AB%98%E7%B4%9A%E3%83%9B%E3%83%86%E3%83%AB')
res.raise_for_status()
#print(res.text)
pattTitle = re.compile('<h3 class="r"><a href="/url(.*?)">(.*?)</a></h3><div class="s">')
pattUrl = re.compile('<cite>(.*?)</cite>')
matchTitle = pattTitle.findall(res.text)
matchUrl = pattUrl.findall(res.text)
for i in range(10):
rettxtTitle = matchTitle[i]
rettxtUrl = matchUrl[i]
text = rettxtTitle[1].replace("<b>", "")
text = text.replace("</b>", "")
print("<<<" + text + ">>>")
print(rettxtUrl)
print("---------------")
| null |
premiere02.py
|
premiere02.py
|
py
| 673 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.get",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 8,
"usage_type": "call"
}
] |
116305201
|
# Python standard.
import random
from typing import List
# Third-party.
import numpy as np
# Local.
from scratch.activations import Activations
class Perceptron:
""" Single layer Neural Network. Linear classifier (binary). For Linearly separable problems."""
def __init__(self, inputs: List[float], activation_fn_name: str, learning_rate: float):
# Random initialization for weights so different hidden units learn different things.
self.bias = 1
self.inputs = inputs + [self.bias]
self.weights = np.array([random.random() for _ in range(len(inputs) + self.bias)])
self.activation_fn_name = activation_fn_name
self.learning_rate = learning_rate
@staticmethod
def sign(x: float):
return 1 if x >= 0 else 0
def feed_forward(self):
""" Z(l) = W(l)X(l) + B(l). Linear forward function. Bias is added on creation of Perceptron object. """
sum_ = 0
for i in range(len(self.weights)):
sum_ += self.inputs[i] * self.weights[i]
return getattr(Activations(sum_), self.activation_fn_name)()
def feed_forward_and_adjust_weights(self, target: float):
""" delta weight = error * input * learning rate. """
estimate = self.feed_forward()
error = target - estimate
for i in range(len(self.weights)):
self.weights[i] += error * self.inputs[i] * self.learning_rate
if __name__ == '__main__':
perceptron = Perceptron(
inputs=[-1, 0.5, 0.25, -0.75, 0.99, 0.01],
activation_fn_name='sigmoid',
learning_rate=0.01,
)
print(perceptron.feed_forward())
| null |
scratch/perceptron.py
|
perceptron.py
|
py
| 1,642 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "typing.List",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "scratch.activations.Activations",
"line_number": 33,
"usage_type": "call"
}
] |
168914108
|
import yfinance as yf
def get_yahoo_stats_for_candidate(s, notification_callback):
# notification_callback.emit("Downloading the data for: "+s)
df=yf.download(s, period = "1y")
# notification_callback.emit("Figuring average Drop and Change for: "+ s)
df['drop']=df['Open']-df['Low']
df['dropP']=df['drop']/df['Open']*100
df['diffD']=df['Low']-df['High']
df['diffD']=df['diffD'].abs()
df['diffP']=df['diffD']/df['Open']*100
avdropP=df["dropP"].mean()
avChange=df["diffP"].mean()
return avdropP,avChange
| null |
Research/UpdateCandidates.py
|
UpdateCandidates.py
|
py
| 552 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "yfinance.download",
"line_number": 9,
"usage_type": "call"
}
] |
397024757
|
from django.urls import path
from . import views
urlpatterns = [
path('register/', views.registerPage, name="register"),
path('', views.index, name='index'),
path("update/<int:pk>/", views.update_task, name="update_task"),
path('<int:pk>', views.delete_task, name="delete_task"),
]
| null |
todolist/urls.py
|
urls.py
|
py
| 312 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
}
] |
128730014
|
# -*- coding: utf-8 -*-
"""
networking.py
~~~~~~~~~~~~
This module implements Settings HP OneView REST API
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import range
from future import standard_library
standard_library.install_aliases()
__title__ = 'networking'
__version__ = '0.0.1'
__copyright__ = '(C) Copyright (2012-2015) Hewlett Packard Enterprise ' \
' Development LP'
__license__ = 'MIT'
__status__ = 'Development'
###
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from hpOneView.common import *
from hpOneView.connection import *
from hpOneView.activity import *
from hpOneView.exceptions import *
class networking(object):
def __init__(self, con):
self._con = con
self._activity = activity(con)
###########################################################################
# Logical Interconnect Group
###########################################################################
def update_settings_from_default(self, settings={}):
if not settings:
settings = make_enet_settings('__NoName__')
default = self._con.get('%s/defaultSettings')
return default
for key in list(settings.keys()):
if key != 'name':
settings[key] = default[key]
return settings
def create_lig(self, lig, blocking=True, verbose=False):
task, body = self._con.post(uri['lig'], lig)
task, entity = self._activity.make_task_entity_tuple(task)
if blocking is True:
task = self._activity.wait4task(task, verbose=verbose)
return entity
def update_lig(self, lig, blocking=True, verbose=False):
task, body = self._con.put(lig['uri'], lig)
if blocking is True:
task = self._activity.wait4task(task, verbose=verbose)
return task
def delete_lig(self, lig, blocking=True, verbose=False):
task, body = self._con.delete(lig['uri'])
if blocking is True:
task = self._activity.wait4task(task, verbose=verbose)
return task
def get_ligs(self):
return get_members(self._con.get(uri['lig']))
def get_lig_by_name(self, ligname):
return self._con.get_entity_byfield(uri['lig'], 'name', ligname)
def get_interconnect_types(self):
# get all the supported interconnect types
resp = get_members(self._con.get(uri['ictype']))
return resp
def get_lis(self):
return get_members(self._con.get(uri['li']))
###########################################################################
# Connection Templates
###########################################################################
def get_connection_templates(self):
return get_members(self._con.get(uri['ct']))
def update_net_ctvalues(self, xnet, bw={}):
if not bw:
return
if not xnet:
raise HPOneViewInvalidResource('Missing Network')
defaultCT = self._con.get(xnet['connectionTemplateUri'])
defaultCT['bandwidth']['maximumBandwidth'] = bw['maximumBandwidth']
defaultCT['bandwidth']['typicalBandwidth'] = bw['typicalBandwidth']
task, body = self._con.put(defaultCT['uri'], defaultCT)
return self._activity.make_task_entity_tuple(task)
###########################################################################
# NetworkSets
###########################################################################
def create_networkset(self, name, nets=[], bw={},
blocking=True, verbose=False):
nset = make_netset_dict(name, nets)
body = self._con.conditional_post(uri['nset'], nset)
task, entity = self._activity.make_task_entity_tuple(body)
if not task and not entity:
# contitional_post returned an already existing resource
return body
else:
# assume we can update CT even if network create task is not cmpelt
self.update_net_ctvalues(entity, bw)
if blocking is True:
task = self._activity.wait4task(task, tout=60, verbose=verbose)
return entity
def delete_networkset(self, networkset, blocking=True, verbose=False):
task, body = self._con.delete(networkset['uri'])
if blocking is True:
task = self._activity.wait4task(task, verbose=verbose)
return task
def get_networksets(self):
return get_members(self._con.get(uri['nset']))
###########################################################################
# Networks
###########################################################################
def create_enet_networks(self, prefix, vid_start, vid_count, bw={}):
enet_list = []
try:
for vid in range(vid_start, vid_start + vid_count):
enet_name = '%s%s' % (prefix, vid)
enet_list.append(self.create_enet_network(enet_name,
vid,
bw=bw
))
except http.client.HTTPException:
# All or nothing
for enet in enet_list:
try:
self._con.delete(enet['uri'])
except http.client.HTTPException:
pass
raise HPOneViewException('Could not create one or more networks')
return enet_list
def create_enet_network(self, name, vid,
purpose='General',
smartLink=True,
privateNetwork=False,
ethernetNetworkType='Tagged',
bw={},
blocking=True,
verbose=False):
xnet = make_enet_dict(name, vid, smartLink=smartLink,
privateNetwork=privateNetwork, purpose=purpose,
ethernetNetworkType=ethernetNetworkType)
task, entity = self.create_network(uri['enet'], xnet, bw, verbose)
if blocking is True:
task = self._activity.wait4task(task, tout=60, verbose=verbose)
return entity
def create_fc_network(self, name, attach='FabricAttach',
autodist=True, linktime=30, bw={},
managedSanUri=None, blocking=True, verbose=False):
xnet = make_fc_dict(name, attach, autodist, linktime, managedSanUri)
task, entity = self.create_network(uri['fcnet'], xnet, bw, verbose)
if blocking is True:
task = self._activity.wait4task(task, tout=60, verbose=verbose)
return entity
def create_network(self, uri, xnet, bw={}, verbose=False):
# throws an exception if there is an error
body = self._con.conditional_post(uri, xnet)
task, entity = self._activity.make_task_entity_tuple(body)
if not task and not entity:
# contitional_post returned an already existing resource
return None, body
else:
# assume we can update CT even if network create task is not cmpelt
self.update_net_ctvalues(entity, bw)
return task, entity
def update_network(self, xnet):
task, body = self._con.put(xnet['uri'], xnet)
return self._activity.make_task_entity_tuple(task)
def delete_network(self, xnet, blocking=True, verbose=False):
task, body = self._con.delete(xnet['uri'])
if blocking is True:
task = self._activity.wait4task(task, verbose=verbose)
return task
def get_enet_networks(self):
return get_members(self._con.get(uri['enet']))
def get_fc_networks(self):
return get_members(self._con.get(uri['fcnet']))
###########################################################################
# Uplink Sets
###########################################################################
def get_uplink_sets(self):
return get_members(self._con.get(uri['uplink-sets']))
def delete_uplink_set(self, uplink_set, blocking=True, verbose=False):
task, body = self._con.delete(uplink_set['uri'])
if blocking is True:
task = self._activity.wait4task(task, verbose=verbose)
return task
###########################################################################
# Interconnects
###########################################################################
def get_interconnects(self):
return get_members(self._con.get(uri['ic']))
def get_enet_network_by_name(self, nwname):
return self._con.get_entity_byfield(uri['enet'], 'name', nwname)
def get_fc_network_by_name(self, nwname):
return self._con.get_entity_byfield(uri['fcnet'], 'name', nwname)
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| null |
hpOneView/networking.py
|
networking.py
|
py
| 10,168 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "future.standard_library.install_aliases",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "future.standard_library",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "builtins.range",
"line_number": 155,
"usage_type": "call"
}
] |
230333680
|
# -*- coding: utf-8 -*-
"""
################################################################################
Cell Counting in Target Nuclei Script
Author: Gerald M
Version U-net (Python 3)
This version uses a UNet to perform semantic segmentation of the images.
Also updated, oversampling correction. Of cells which are now oversampled, the
middle cell slice value is kept rather than the last detected position as before.
Instructions:
1) Run from command line with input parameters
################################################################################
"""
################################################################################
## Module import
################################################################################
import argparse
import csv
import cv2
import json
import fcntl
import math
import os
import psutil
import sys
import tifffile
import time
import warnings
import numpy as np
import nibabel as nib
import pandas as pd
from filters.gaussmedfilt import gaussmedfilt
from filters.adaptcircthresh import adaptcircthresh
from keras.models import model_from_json
from skimage.measure import regionprops, label
from PIL import Image
from skimage import io
from natsort import natsorted
from filters.rollingballfilt import rolling_ball_filter
from multiprocessing import Pool, Queue
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
warnings.simplefilter('ignore', Image.DecompressionBombWarning)
Image.MAX_IMAGE_PIXELS = 1000000000
################################################################################
## Function definitions
################################################################################
def slack_message(text, channel, username):
"""
Slack integration to give slack message to chosen channel. Fill in the slack
hook url below to send to own slack channel.
Params
------
text : str
String of text to post.
channel : str
String for the channel to post to.
username : str
String for the user posting the message.
"""
# from urllib3 import request
import json
post = {"text": "{0}".format(text),
"channel": "{0}".format(channel),
"username": "{0}".format(username),
"icon_url": "https://github.com/gm515/gm515.github.io/blob/master/Images/imperialstplogo.png?raw=true"}
try:
json_data = json.dumps(post)
req = requests.post('https://hooks.slack.com/services/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',
data=json_data.encode('ascii'),
headers={'Content-Type': 'application/json'})
except Exception as em:
print("EXCEPTION: " + str(em))
def distance(a, b):
"""
Calculate distance between coordinates a and b.
Params
------
a : tuple
b : tuple
Returns
-------
out : float
Squared distance between coordinates a and b.
"""
return (a[0] - b[0])**2 + (a[1] - b[1])**2
def oversamplecorr(centroids, radius):
"""
Correction for oversampling given list of centroids.
Params
------
centroids : dictionary
Dictionary of centroids where key is slice position and items are lists
of coordinate positions of detected cells.
radius : int
Radius with which to claim cells are overlapping.
Returns
-------
out : dictionary
Output of dictionary of oversampled corrected cell positions.
"""
keepcentroids = {}
overlapcentroids = {}
i = 0
# First check if there are more than two layers
if len(list(centroids.keys())) > 1:
# Loop through successive layers and identify overlapping cells
for layer1, layer2 in zip(list(centroids.keys())[:-1], list(centroids.keys())[1:]):
# First check if layers are successive otherwise you cannot correct
if layer2-layer1 == 1:
# Store cell centroids for each layer
layer1centroids = centroids[layer1]
layer2centroids = centroids[layer2]
# Loop through each cell in layer 1 and check if overlapping
for cell in layer1centroids:
# Get a boolean list with True in position of cell in layer 2 if cell in layer 1 overlaps and is the minumum distance
distances = np.array([distance(cell, cell2) for cell2 in layer2centroids])
mindistance = distances == np.min(distances)
withindistance = np.array([distance(cell, cell2)<=radius**2 for cell2 in layer2centroids])
overlapping = mindistance&withindistance
# First check if cell is already within the overlap dictionary, overlapcentroids
overlapkey = [key for key, value in overlapcentroids.items() if cell in value]
# If there is a True in the overlapping list, then there is a minimum distance oversampled cell detected
if True in overlapping:
# If so, only add the paired cell
if overlapkey:
overlapcentroids.setdefault(overlapkey[0],[]).append(layer2centroids[np.argmax(overlapping)])
# Else, add both the new cell and pair to it's own unique dictionary key
else:
overlapcentroids.setdefault(i,[]).append(cell)
overlapcentroids.setdefault(i,[]).append(layer2centroids[np.argmax(overlapping)])
# Update counter to keep track of number of overlapped cells in total
# Uses this as key
i += 1
# Only if all overlapping is False and the cell is not detected in overlapcentroids already, then add cell to keep
if (not True in overlapping) and (not overlapkey):
# If no overlap is detected, then stick cell into keep dictionary
keepcentroids.setdefault(cell[2], []).append(cell)
else:
layer1centroids = centroids[layer1]
for cell in layer1centroids:
keepcentroids.setdefault(cell[2], []).append(cell)
# Account for the last layer
layer2centroids = centroids[layer2]
for cell in layer2centroids:
overlapkey = [key for key, value in overlapcentroids.items() if cell in value]
if overlapkey:
break
else:
keepcentroids.setdefault(cell[2], []).append(cell)
# Go through each overlapping cell and take the middle cell
# Stick middle cell into the keep dictionary at the relevant slice position
for key, overlapcells in overlapcentroids.items():
midcell = overlapcells[int(len(overlapcells)/2)]
keepcentroids.setdefault(midcell[2], []).append(midcell)
else:
keepcentroids = centroids
return keepcentroids
def get_children(json_obj, acr, ids):
for obj in json_obj:
if obj['children'] == []:
acr.append(obj['acronym'])
ids.append(obj['id'])
else:
acr.append(obj['acronym'])
ids.append(obj['id'])
get_children(obj['children'], acr, ids)
return (acr, ids)
def get_structure(json_obj, acronym):
found = (False, None)
for obj in json_obj:
if obj['acronym'].lower() == acronym:
[acr, ids] = get_children(obj['children'], [], [])
if ids == []:
acr = [obj['acronym']]
ids = [obj['id']]
return (True, acr, ids)
else:
acr.append(obj['acronym'])
ids.append(obj['id'])
return (True, acr, ids)
else:
found = get_structure(obj['children'], acronym)
if found:
return found
def progressBar(sliceno, value, endvalue, statustext, bar_length=50):
percent = float(value) / endvalue
arrow = '-' * int(round(percent * bar_length)) + '/'
spaces = ' ' * (bar_length - len(arrow))
sys.stdout.write("\nSlice {0} [{1}] {2}% {3}".format(sliceno, arrow + spaces, int(round(percent * 100)), statustext))
class Network():
def __init__(self):
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "" #0 for GPU
from keras.models import load_model
import keras.backend as K
import tensorflow as tf
K.set_learning_phase(0)
K.set_session(tf.Session())
model_path = '../../neuroseg/models/2019_09_30_UNet/focal_unet_model.json'
weights_path = '../../neuroseg/models/2019_09_30_UNet/focal_unet_weights.best.hdf5'
# Load the classifier model, initialise and compile
with open(model_path, 'r') as f:
self.model = model_from_json(f.read())
self.model.load_weights(weights_path)
def predict(self, img, **kwargs):
return self.model.predict(img)
def cellcount(imagequeue, radius, size, circ_thresh, use_medfilt):
while True:
item = imagequeue.get()
if item is None:
break
else:
slice_number, image, hemseg_image, row_idx, col_idx, count_path, name = item
centroids = []
if image.shape[0]*image.shape[1] > (radius*2)**2 and np.max(image) != 0.:
model = Network()
images_array = []
image = image.astype(np.float32)
image = (image-np.min(image))/(np.max(image)-np.min(image))
# Image.fromarray(np.uint8(image*255)).save('/Users/gm515/Desktop/img/'+str(slice_number)+'.tif')
shape = image.shape
newshape = tuple((int( 16 * math.ceil( i / 16. )) for i in shape))
image = np.pad(image, ((0,np.subtract(newshape,shape)[0]),(0,np.subtract(newshape,shape)[1])), 'constant')
images_array.append(image)
images_array = np.array(images_array)
images_array = images_array[..., np.newaxis]
image = model.predict(images_array)
image = np.squeeze(image[0])
image = image[0:shape[0],0:shape[1]]
# Image.fromarray(np.uint8((image>0.25)*255)).save('/Users/gm515/Desktop/pred/'+str(slice_number)+'.tif')
# Remove objects smaller than chosen size
image = label(image>0.5, connectivity=image.ndim)
# Get centroids list as (row, col) or (y, x)
centroids = [region.centroid for region in regionprops(image) if ((region.area>size) and (region.area<10*size) and (((4 * math.pi * region.area) / (region.perimeter * region.perimeter))>0.7))]
# Add 1 to slice number to convert slice in index to slice file number
if row_idx is not None:
# Convert coordinate of centroid to coordinate of whole image if mask was used
if hemseg_image is not None:
coordfunc = lambda celly, cellx : (col_idx[cellx], row_idx[celly], slice_number, int(hemseg_image[celly,cellx]))
else:
coordfunc = lambda celly, cellx : (col_idx[cellx], row_idx[celly], slice_number)
else:
coordfunc = lambda celly, cellx : (cellx, celly, slice_number)
# Centroids are currently (row, col) or (y, x)
# Flip order so (x, y) using coordfunc
centroids = [coordfunc(int(c[0]), int(c[1])) for c in centroids]
# Write out results to file
csv_file = count_path+'/counts_unet/'+str(name)+'_unet_count_INQUEUE.csv'
while True:
try:
with open(csv_file, 'a+') as f:
# Lock file during writing
fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
csv.writer(f, delimiter=',').writerows(centroids)
# Unlock file and clsoe out
fcntl.flock(f, fcntl.LOCK_UN)
break
except IOError as e:
# raise on unrelated IOErrors
if e.errno != errno.EAGAIN:
raise
else:
time.sleep(0.1)
print('Finished - Queue position: '+str(slice_number)+' Structure: '+str(name))
if __name__ == '__main__':
################################################################################
## User defined parameters via command line arguments
################################################################################
parser = argparse.ArgumentParser()
parser.add_argument('imagepath', default=[], type=str, help='Image directory path for counting')
parser.add_argument('-maskpath', default=[], type=str, dest='maskpath', help='Annotation file path for masking')
parser.add_argument('-hempath', default=[], type=str, dest='hempath', help='Hemisphere file path for hemisphere classification')
parser.add_argument('-structures', default=[], type=str, dest='structures', help='List of structures to count within')
parser.add_argument('-oversample', action='store_false', default=True, dest='oversample', help='Oversample correction')
parser.add_argument('-start', default=None, type=int, dest='start', help='Start image number if required')
parser.add_argument('-end', default=None, type=int, dest='end', help='End image number if required')
parser.add_argument('-medfilt', default=False, action='store_true', dest='medfilt', help='Use custom median donut filter')
parser.add_argument('-circthresh', default=0.7, type=float, dest='circthresh', help='Circularity threshold value')
parser.add_argument('-xyvox', default=0.54, type=float, dest='xyvox', help='XY voxel size')
parser.add_argument('-zvox', default=10., type=float, dest='zvox', help='Z voxel size')
parser.add_argument('-ncpu', default=6, type=int, dest='ncpu', help='Number of CPUs to use')
parser.add_argument('-size', default=100., type=float, dest='size', help='Approximate radius of detected objects')
parser.add_argument('-radius', default=6, type=float, dest='radius', help='Approximate radius of detected objects')
args = parser.parse_args()
image_path = args.imagepath
mask_path = args.maskpath
hem_path = args.hempath
structure_list = args.structures
over_sample = args.oversample
number_files = [None, None]
number_files[0] = args.start
number_files[1] = args.end
use_medfilt = args.medfilt
circ_thresh = args.circthresh
xyvox = args.xyvox
zvox = args.zvox
ncpu = args.ncpu
size = args.size
radius = args.radius
if mask_path:
mask = True
else:
mask = False
if hem_path:
hem = True
else:
hem = False
print ('User defined parameters')
print( "Image path: {} \nAnnotation path: {} \nHemisphere path: {} \nStructure list: {} \nOversample: {} \nStart: {} \nEnd: {} \nCustom median donut filter: {} \nCircularity threshold: {} \nXYvox: {} \nZvox: {} \nncpu: {} \nSize: {} \nRadius: {}".format(
image_path,
mask_path,
hem_path,
structure_list,
over_sample,
number_files[0],
number_files[1],
use_medfilt,
circ_thresh,
xyvox,
zvox,
ncpu,
size,
radius
))
print ('')
################################################################################
## Initialisation
################################################################################
# Create directory to hold the counts in parent folder of images
count_path = '/'+os.path.join(*image_path.split(os.sep)[:-1])
if not os.path.exists(count_path+'/counts_unet'):
os.makedirs(count_path+'/counts_unet')
# List of files to count
count_files = []
count_files += [each for each in os.listdir(image_path) if each.endswith('.tif')]
count_files = natsorted(count_files)
if number_files[0] != None:
count_files = count_files[number_files[0]-1:number_files[1]]
print ('Counting in files: '+count_files[0]+' to '+count_files[-1])
################################################################################
## Retrieving structures IDs
################################################################################
if mask:
file, extension = os.path.splitext(mask_path)
if extension == '.nii':
seg = nib.load(mask_path).get_data()
else:
seg = io.imread(mask_path)
print ('Loaded segmentation atlas')
if hem:
file, extension = os.path.splitext(hem_path)
if extension == '.nii':
hemseg = nib.load(hem_path).get_data()
else:
hemseg = io.imread(hem_path)
print ('Loaded hemisphere atlas')
ids = []
acr = []
index = np.array([[],[],[]])
if mask:
anno_file = json.load(open('2017_annotation_structure_info.json'))
structure_list = [x.strip() for x in structure_list.lower().split(",")]
for elem in structure_list:
a, i = get_structure(anno_file['children'], elem)[1:]
for name, structure in zip(a, i):
if structure in seg:
index = np.concatenate((index, np.array(np.nonzero(structure == seg))), axis=1)
ids.append(structure)
acr.append(name)
else:
print (name+' not found -> Removed')
else:
ids.extend(['None'])
acr.extend(['None'])
################################################################################
## Counting
################################################################################
print ('')
tstart = time.time()
structure_index = 0
################################################################################
## Loop through each slice and count in chosen structure
################################################################################
proceed = True
if mask:
index = np.array([[],[],[]])
index = np.concatenate((index, np.where(np.isin(seg,ids))), axis=1)
if index.size > 0:
zmin = int(index[0].min())
zmax = int(index[0].max())
else:
proceed = False
else:
zmin = 0
zmax = len(count_files)
if proceed:
# Create a Queue and push images to queue
print ('Setting up Queue')
imagequeue = Queue()
# Start processing images
print ('Creating threads to process Queue items')
imageprocess = Pool(ncpu, cellcount, (imagequeue, radius, size, circ_thresh, use_medfilt))
print ('')
for slice_number in range(zmin,zmax):
# Load image and convert to dtype=float and scale to full 255 range
# image = Image.open(image_path+'/'+count_files[slice_number], 'r')
# temp_size = image.size
# image = np.frombuffer(image.tobytes(), dtype=np.uint8, count=-1).reshape(image.size[::-1])
image = tifffile.imread(image_path+'/'+count_files[slice_number], key=0).astype(np.float32)
temp_size = image.shape[::-1]
image_max = np.max(image)
if mask:
# Get annotation image for slice
mask_image = np.array(Image.fromarray(seg[slice_number]).resize(tuple([int(x) for x in temp_size]), Image.NEAREST))
# Initiate empty lists
row_idx = []
col_idx = []
################################################################################
## Loop through slices based on cropped boundaries and store into one array
################################################################################
row_idx_array = None
col_idx_array = None
# pxvolume = 0
# Loop through structures available in each slice
for name, structure in zip(acr,ids):
# If masking is not required, submit to queue with redundat variables
if not mask:
imagequeue.put((slice_number, image, [None], [None], [None], count_path, name))
print (image_path.split(os.sep)[3]+' Added slice: '+str(slice_number)+' Queue position: '+str(slice_number-zmin)+' Structure: '+str(name)+' [Memory info] Usage: '+str(psutil.virtual_memory().percent)+'% - '+str(int(psutil.virtual_memory().used*1e-6))+' MB\n')
else:
start = time.time()
if structure in mask_image:
# Resize mask
start = time.time()
mask_image_per_structure = np.copy(mask_image)
mask_image_per_structure[mask_image_per_structure!=structure] = 0
# Use mask to get global coordinates
idx = np.ix_(mask_image_per_structure.any(1),mask_image_per_structure.any(0))
row_idx = idx[0].flatten()
col_idx = idx[1].flatten()
# Apply crop to image and mask then apply mask
image_per_structure = np.copy(image)[idx]
mask_image_per_structure = mask_image_per_structure[idx]
start = time.time()
image_per_structure = image_per_structure.astype(float)
# image_per_structure = np.multiply(np.divide(image_per_structure,np.max(image_per_structure)), 255.)
image_per_structure *= 255./image_max
mask_image_per_structure = cv2.medianBlur(np.array(mask_image_per_structure).astype(np.uint8), 121) # Apply median filter to massively reduce box like boundary to upsized mask
image_per_structure[mask_image_per_structure==0] = 0
# Keep track of pixel volume
# pxvolume += mask_image_per_structure.any(axis=-1).sum()
mask_image_per_structure = None
if hem:
hemseg_image_per_structure = np.array(Image.fromarray(hemseg[slice_number]).resize(tuple([int(x) for x in temp_size]), Image.NEAREST))
hemseg_image_per_structure = hemseg_image_per_structure[idx]
# Add queue number, image, row and col idx to queue
imagequeue.put((slice_number, image_per_structure, hemseg_image_per_structure, row_idx, col_idx, count_path, name))
image_per_structure = None
hemseg_image_per_structure = None
statustext = image_path.split(os.sep)[3]+' Added slice: '+str(slice_number)+' Queue position: '+str(slice_number-zmin)+' Structure: '+str(name)+' [Memory info] Usage: '+str(psutil.virtual_memory().percent)+'% - '+str(int(psutil.virtual_memory().used*1e-6))+' MB\n'
progressBar(slice_number, slice_number-zmin, zmax-zmin, statustext)
for close in range(ncpu):
imagequeue.put(None)
imageprocess.close()
imageprocess.join()
print ('')
print ('Finished queue processing')
print ('')
print ('Performing oversampling correction...')
# Oversampling correction and table write-out
df = pd.DataFrame(columns = ['ROI', 'L', 'R'])
for name in acr:
with open(count_path+'/counts_unet/'+str(name)+'_unet_count_INQUEUE.csv') as csvDataFile:
csvReader = csv.reader(csvDataFile)
centroids = {}
for row in csvReader:
centroids.setdefault(int(row[2]), []).append([int(entry) for entry in row])
print (str(sum(map(len, centroids.values())))+' Original uncorrected count')
keepcentroids = oversamplecorr(centroids,radius)
print (str(sum(map(len, keepcentroids.values())))+' Final corrected count')
with open(count_path+'/counts_unet/'+str(name)+'_unet_count.csv', 'w+') as f:
for key in sorted(keepcentroids.keys()):
if len(keepcentroids[key]) > 0:
csv.writer(f, delimiter=',').writerows([val for val in keepcentroids[key]])
os.remove(count_path+'/counts_unet/'+str(name)+'_unet_count_INQUEUE.csv')
print (name+' oversampling done')
keepcentroids = pd.read_csv(count_path+'/counts_unet/'+str(name)+'_unet_count.csv', names=['X', 'Y', 'Z', 'Hemisphere'])
leftcells = len(keepcentroids.loc[keepcentroids['Hemisphere']==0])
rightcells = len(keepcentroids.loc[keepcentroids['Hemisphere']==1])
df = df.append({'ROI':name, 'L':leftcells, 'R':rightcells}, ignore_index=True)
# Write dataframe to csv
df.to_csv(count_path+'/counts_unet/_counts_table.csv', index=False)
print ('')
print ('~Fin~')
print (count_path)
minutes, seconds = divmod(time.time()-tstart, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
text = 'Counting completed in %02d:%02d:%02d:%02d' %(days, hours, minutes, seconds)
print (text)
slack_message(text, '#cctn', 'CCTN')
| null |
cell_counting/cctn/cctn_unet.py
|
cctn_unet.py
|
py
| 25,778 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.environ",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "warnings.simplefilter",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "PIL.Image.DecompressionBombWarning",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "PIL.Image.MAX_IMAGE_PIXELS",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 222,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 227,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 228,
"usage_type": "attribute"
},
{
"api_name": "keras.backend.set_learning_phase",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "keras.backend.set_session",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "tensorflow.Session",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "keras.models.model_from_json",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "{'os': 'os', 'load_model': 'keras.models.load_model', 'K': 'keras.backend', 'tf': 'tensorflow'}",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 263,
"usage_type": "attribute"
},
{
"api_name": "numpy.min",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "numpy.pad",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "numpy.subtract",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",
"line_number": 274,
"usage_type": "attribute"
},
{
"api_name": "numpy.squeeze",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "skimage.measure.label",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "skimage.measure.regionprops",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 288,
"usage_type": "attribute"
},
{
"api_name": "fcntl.flock",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "fcntl.LOCK_EX",
"line_number": 311,
"usage_type": "attribute"
},
{
"api_name": "fcntl.LOCK_NB",
"line_number": 311,
"usage_type": "attribute"
},
{
"api_name": "csv.writer",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "fcntl.flock",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "fcntl.LOCK_UN",
"line_number": 315,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 399,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 399,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 399,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 401,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 401,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "natsort.natsorted",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 417,
"usage_type": "attribute"
},
{
"api_name": "nibabel.load",
"line_number": 419,
"usage_type": "call"
},
{
"api_name": "skimage.io.imread",
"line_number": 421,
"usage_type": "call"
},
{
"api_name": "skimage.io",
"line_number": 421,
"usage_type": "name"
},
{
"api_name": "os.path.splitext",
"line_number": 425,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 425,
"usage_type": "attribute"
},
{
"api_name": "nibabel.load",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "skimage.io.imread",
"line_number": 429,
"usage_type": "call"
},
{
"api_name": "skimage.io",
"line_number": 429,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 436,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 442,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 442,
"usage_type": "call"
},
{
"api_name": "numpy.nonzero",
"line_number": 442,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 466,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 467,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 467,
"usage_type": "call"
},
{
"api_name": "numpy.isin",
"line_number": 467,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Queue",
"line_number": 482,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 486,
"usage_type": "call"
},
{
"api_name": "tifffile.imread",
"line_number": 494,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 494,
"usage_type": "attribute"
},
{
"api_name": "numpy.max",
"line_number": 496,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 500,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 500,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 500,
"usage_type": "name"
},
{
"api_name": "PIL.Image.NEAREST",
"line_number": 500,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 518,
"usage_type": "attribute"
},
{
"api_name": "psutil.virtual_memory",
"line_number": 518,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 520,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 524,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 526,
"usage_type": "call"
},
{
"api_name": "numpy.ix_",
"line_number": 530,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 535,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 538,
"usage_type": "call"
},
{
"api_name": "cv2.medianBlur",
"line_number": 543,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 543,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 543,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 553,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 553,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 553,
"usage_type": "name"
},
{
"api_name": "PIL.Image.NEAREST",
"line_number": 553,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 562,
"usage_type": "attribute"
},
{
"api_name": "psutil.virtual_memory",
"line_number": 562,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 578,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 582,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 594,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 596,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 599,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 612,
"usage_type": "call"
}
] |
268321940
|
from django.shortcuts import render, get_object_or_404, HttpResponse
from courses import models
from django.views.generic.base import View
from pure_pagination import Paginator, EmptyPage, PageNotAnInteger
from operation.models import UserFavorite, CourseComments, UserCourse
from django.db.models import Q
from utils.mixin_utils import LoginRequiredMixin
# Create your views here.
class CourseListView(View):
"""
课程列表页面
"""
def get(self, request):
all_course = models.Course.objects.all()
hot_courses = models.Course.objects.all().order_by("-students")[:3]
search_keywords = request.GET.get('keywords', '')
if search_keywords:
# Q可以实现多个字段,之间是or的关系
all_course = all_course.filter(
Q(name__icontains=search_keywords) |
Q(desc__icontains=search_keywords) |
Q(detail__icontains=search_keywords)
)
# 对课程进行分页
# 尝试获取前台get请求传递过来的page参数
# 如果是不合法的配置参数默认返回第一页
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# sort排序
sort = request.GET.get('sort', "")
if sort:
if sort == "students":
all_course = all_course.order_by("-students")
elif sort == "hot":
all_course = all_course.order_by("-click_nums")
p = Paginator(all_course, 6, request=request)
courses = p.page(page)
return render(request, 'course-list.html', {
'all_course': courses,
'sort': sort,
"hot_courses": hot_courses,
})
class CourseDetailView(View):
"""
详情页
"""
def get(self, request, course_id):
course_obj = get_object_or_404(models.Course, pk=course_id)
course_obj.click_nums += 1
course_obj.save()
tag = course_obj.tag
if tag:
# 相关课程推荐
relate_courses = models.Course.objects.filter(tag=tag).exclude(pk__in=[course_obj.pk])[:2]
else:
relate_courses = []
# 判断是否已经收藏
has_fav_course = False
has_fav_org = False
# 必须是用户已登录我们才需要判断。
if request.user.is_authenticated:
if UserFavorite.objects.filter(user=request.user, fav_id=course_obj.id, fav_type=1):
has_fav_course = True
if UserFavorite.objects.filter(user=request.user, fav_id=course_obj.course_org.id, fav_type=2):
has_fav_org = True
return render(request, 'course-detail.html', {
'course': course_obj,
'relate_courses': relate_courses,
'has_fav_course': has_fav_course,
'has_fav_org': has_fav_org
})
class CourseInfoView(LoginRequiredMixin, View):
"""
课程信息视图
"""
def get(self, request, course_id):
course = get_object_or_404(models.Course,id=course_id)
course.students += 1
course.save()
user_courses = UserCourse.objects.filter(user=request.user, course=course)
if not user_courses:
# 如果没有学习该门课程就关联起来
user_course = UserCourse(user=request.user, course=course)
user_course.save()
# 把这些都写在include tag里面了
# all_resources = models.CourseResource.objects.filter(course=course)
return render(request, 'course-video.html', {
'course': course,
# 'all_resources': all_resources # 把这些都写在include tag里面了
})
class CommentsView(LoginRequiredMixin, View):
"""
评论展示视图
"""
def get(self, request, course_id):
course = models.Course.objects.get(id=int(course_id))
# all_resources = models.CourseResource.objects.filter(course=course) # 把这些都写在include tag里面了
all_comments = CourseComments.objects.all()
return render(request, "course-comment.html", {
"course": course,
# "all_resources": all_resources, # 把这些都写在include tag里面了
'all_comments': all_comments,
})
class AddCommentsView(LoginRequiredMixin, View):
"""
添加评论接口视图
"""
def post(self, request):
if not request.user.is_authenticated:
# 未登录时返回json提示未登录,跳转到登录页面是在ajax中做的
return HttpResponse('{"status":"fail", "msg":"用户未登录"}', content_type='application/json')
course_id = request.POST.get("course_id", 0)
comments = request.POST.get("comments", "")
if int(course_id) > 0 and comments:
# 实例化一个course_comments对象
course_comments = CourseComments()
# 获取评论的是哪门课程
course = models.Course.objects.get(id = int(course_id))
# 分别把评论的课程、评论的内容和评论的用户保存到数据库
course_comments.course = course
course_comments.comments = comments
course_comments.user = request.user
course_comments.save()
return HttpResponse('{"status":"success", "msg":"评论成功"}', content_type='application/json')
else:
return HttpResponse('{"status":"fail", "msg":"评论失败"}', content_type='application/json')
class VideoPlayView(LoginRequiredMixin, View):
"""
视频播放页面
"""
def get(self, request, video_id):
video = get_object_or_404(models.Video, pk=video_id)
# 通过外键找到章节再找到视频对应的课程
course = video.lesson.course
course.students += 1
course.save()
return render(request, 'course-play.html', {
'course': course
})
| null |
apps/courses/views.py
|
views.py
|
py
| 6,014 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.views.generic.base.View",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "courses.models.Course.objects.all",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "courses.models.Course",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "courses.models",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "courses.models.Course.objects.all",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "courses.models.Course",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "courses.models",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.db.models.Q",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pure_pagination.PageNotAnInteger",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "pure_pagination.Paginator",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "django.views.generic.base.View",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "courses.models.Course",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "courses.models",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "courses.models.Course.objects.filter",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "courses.models.Course",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "courses.models",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "operation.models.UserFavorite.objects.filter",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "operation.models.UserFavorite.objects",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "operation.models.UserFavorite",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "operation.models.UserFavorite.objects.filter",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "operation.models.UserFavorite.objects",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "operation.models.UserFavorite",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "utils.mixin_utils.LoginRequiredMixin",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "django.views.generic.base.View",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "courses.models.Course",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "courses.models",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "operation.models.UserCourse.objects.filter",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "operation.models.UserCourse.objects",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "operation.models.UserCourse",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "operation.models.UserCourse",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "utils.mixin_utils.LoginRequiredMixin",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "django.views.generic.base.View",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "courses.models.Course.objects.get",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "courses.models.Course",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "courses.models",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "operation.models.CourseComments.objects.all",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "operation.models.CourseComments.objects",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "operation.models.CourseComments",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "utils.mixin_utils.LoginRequiredMixin",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "django.views.generic.base.View",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.HttpResponse",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "operation.models.CourseComments",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "courses.models.Course.objects.get",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "courses.models.Course",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "courses.models",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.HttpResponse",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.HttpResponse",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "utils.mixin_utils.LoginRequiredMixin",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "django.views.generic.base.View",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "courses.models.Video",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "courses.models",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 157,
"usage_type": "call"
}
] |
366480015
|
from flask import render_template, jsonify, request, flash
from app import db
from flask_login import login_required, current_user
from app.main import bp
import json
import requests
import time
@bp.route('/')
@bp.route('/index')
@login_required
def index():
user = current_user
if user.achievement1 is None:
user.achievement1 = 0
user.achievement2 = 0
user.achievement3 = 0
user.achievement4 = 0
user.achievement5 = 0
user.achievement6 = 0
user.achievement7 = 0
user.achievement8 = 0
user.achievement9 = 0
user.achievement10 = 0
db.session.add(user)
db.session.commit()
return render_template('index.html', title='Home', user=current_user)
@bp.route('/achievements')
@login_required
def achievements():
return render_template('achievements.html', title='Achievements', user=current_user)
@bp.route('/update_achievements', methods=['POST'])
@login_required
def update_achievements():
user = current_user
if user.points is None:
pass
elif (user.points // 1000 + 1) == 2:
user.achievement1 = 1
elif (user.points // 1000 + 1) == 5:
user.achievement2 = 1
elif (user.points // 1000 + 1) == 10:
user.achievement3 = 1
elif (user.points // 1000 + 1) == 15:
user.achievement4 = 1
elif (user.points // 1000 + 1) == 20:
user.achievement5 = 1
elif (user.points // 1000 + 1) == 25:
user.achievement6 = 1
elif (user.points // 1000 + 1) == 30:
user.achievement7 = 1
elif (user.points // 1000 + 1) == 40:
user.achievement8 = 1
elif (user.points // 1000 + 1) == 50:
user.achievement9 = 1
elif (user.points // 1000 + 1) == 100:
user.achievement10 = 1
else:
pass
db.session.commit()
return render_template('achievements.html', title='Achievements', user=current_user)
@bp.route('/profile')
@login_required
def profile():
return render_template('profile.html', title='Profile', user=current_user)
@bp.route('/update_lastlogin', methods=['POST'])
@login_required
def update_lastlogin():
user = current_user
currtime = time.time()
lastlogin = user.lastlogin
multiplier = user.multiplier
if user.lastlogin is None: # first time login setup
user.lastlogin = currtime
user.currstreak = 1
user.multiplier = 1.00
db.session.add(user)
db.session.commit()
return render_template('index.html', title='Home', user=current_user)
if currtime - lastlogin > 86400 and currtime - lastlogin < 172800:
# its been more than 24 hours and less than 48 hours since last login, re-up (or reset if sunday)
localtime = time.localtime()
if localtime.tm_wday == 6: # reset on Sundays. tm_wday has value in [0,6] where 0 is monday and 6 is sunday
user.currstreak = 1
user.multiplier = 1.00
user.lastlogin = currtime
db.session.add(user)
db.session.commit()
return render_template('index.html', title='Home', user=current_user)
user.currstreak = user.currstreak + 1 # up streak by 1
user.lastlogin = currtime # set new lastlogin, user needs to sign in again between 24 to 48 hours from this point to maintain streak
if user.currstreak == 2:
user.multiplier = 1.10
if user.currstreak == 3:
user.multiplier = 1.50
if user.currstreak == 4:
user.multiplier = 2.00
if user.currstreak == 5:
user.multiplier = 2.50
if user.currstreak == 6:
user.multiplier == 2.75
if user.currstreak == 7:
user.multiplier = 3.00
db.session.add(user)
db.session.commit()
return render_template('index.html', title='Home', user=current_user)
elif currtime - lastlogin > 172800: # user didnt maintain streak, reset
user.currstreak = 1
user.multiplier = 1.00
user.lastlogin = currtime # set new lastlogin, user needs to sign in again between 24 to 48 hours from this point to maintain streak
db.session.add(user)
db.session.commit()
return render_template('index.html', title='Home', user=current_user)
return render_template('index.html', title='Home', user=current_user)
@bp.route('/contact')
@login_required
def contact():
return render_template('contact.html', title='Contact Us', user=current_user, points=current_user.points)
@bp.route('/weeklyQuiz')
@login_required
def weeklyquiz():
return render_template('weeklyquiz.html', title='Weekly Quiz',user=current_user, points = current_user.points)
@bp.route('/weeklyQuizDone')
@login_required
def weeklyquizDone():
return render_template('weeklyquizDone.html', title='Weekly Quiz',user=current_user, points = current_user.points)
@bp.route('/update_quiz_status', methods=['POST'])
@login_required
def update_quiz_status():
user = current_user
user.quiz_taken=1
db.session.add(user)
db.session.commit()
return render_template('weeklyquiz.html', title='weeklyQuiz', user=current_user, points = current_user.points)
| null |
app/main/routes.py
|
routes.py
|
py
| 5,200 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask_login.current_user",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "app.db.session.add",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "app.db.session",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "app.db",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "app.db.session.commit",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "app.db.session",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "app.db",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "flask_login.current_user",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "app.main.bp.route",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "app.main.bp",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "app.main.bp.route",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "app.main.bp",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "flask_login.login_required",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "flask_login.current_user",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "app.main.bp.route",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "app.main.bp",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "flask_login.login_required",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "flask_login.current_user",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "app.db.session.commit",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "app.db.session",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "app.db",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "flask_login.current_user",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "app.main.bp.route",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "app.main.bp",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "flask_login.login_required",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "flask_login.current_user",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "app.main.bp.route",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "app.main.bp",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "flask_login.login_required",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "flask_login.current_user",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "app.db.session.add",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "app.db.session",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "app.db",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "app.db.session.commit",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "app.db.session",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "app.db",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "flask_login.current_user",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "time.localtime",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "app.db.session.add",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "app.db.session",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "app.db",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "app.db.session.commit",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "app.db.session",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "app.db",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "flask_login.current_user",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "app.db.session.add",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "app.db.session",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "app.db",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "app.db.session.commit",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "app.db.session",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "app.db",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "flask_login.current_user",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "app.db.session.add",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "app.db.session",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "app.db",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "app.db.session.commit",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "app.db.session",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "app.db",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "flask_login.current_user",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "flask_login.current_user",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "app.main.bp.route",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "app.main.bp",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "flask_login.login_required",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "flask_login.current_user",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "flask_login.current_user.points",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "app.main.bp.route",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "app.main.bp",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "flask_login.login_required",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "flask_login.current_user",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "flask_login.current_user.points",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "app.main.bp.route",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "app.main.bp",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "flask_login.login_required",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "flask_login.current_user",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "flask_login.current_user.points",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "app.main.bp.route",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "app.main.bp",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "flask_login.login_required",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "flask_login.current_user",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "app.db.session.add",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "app.db.session",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "app.db",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "app.db.session.commit",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "app.db.session",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "app.db",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "flask_login.current_user",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "flask_login.current_user.points",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "app.main.bp.route",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "app.main.bp",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "flask_login.login_required",
"line_number": 151,
"usage_type": "name"
}
] |
236750751
|
from django.db.models import Q
from rest_framework import generics, mixins
from omsApp.models import Product, ProductType
from .serializers import ProductSerializer, ProductTypeSerializer
from .permissions import IsOwnerOrReadOnly
class ProductTypeRudView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'id'
serializer_class = ProductTypeSerializer
permission_classes = [IsOwnerOrReadOnly]
def get_queryset(self):
return ProductType.objects.all()
class ProductTypeAPIView(mixins.CreateModelMixin, generics.ListAPIView):
lookup_field = 'id'
serializer_class = ProductTypeSerializer
queryset = ProductType.objects.all()
def get_queryset(self):
qs = ProductType.objects.all()
query = self.request.GET.get("q")
if query is not None:
qs = qs.filter(Q(type__icontains=query) | Q(description__icontains=query)).distinct()
return qs
class ProductsAPIView(mixins.CreateModelMixin, generics.ListAPIView):
lookup_field = 'id' # url(r'?P<pk>\d+')
serializer_class = ProductSerializer
queryset = Product.objects.all()
def get_queryset(self):
qs = Product.objects.all()
query = self.request.GET.get("q")
if query is not None:
qs = qs.filter(Q(name__icontains=query) | Q(image__icontains=query)).distinct()
return qs
# needs user authentication to create which is a read only in serializers
def perform_create(self, serializer):
serializer.save(user=self.request.user)
# built in method to handle a create call
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class ProductsRudView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'id' # url(r'?P<pk>\d+')
serializer_class = ProductSerializer
permission_classes = [IsOwnerOrReadOnly]
# queryset = Product.objects.all()
def get_queryset(self):
return Product.objects.all()
# def get_object(self):
# return Product.objects.all()
| null |
omsApp/api/views.py
|
views.py
|
py
| 2,049 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "rest_framework.generics.RetrieveUpdateDestroyAPIView",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "serializers.ProductTypeSerializer",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "permissions.IsOwnerOrReadOnly",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "omsApp.models.ProductType.objects.all",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "omsApp.models.ProductType.objects",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "omsApp.models.ProductType",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "rest_framework.mixins.CreateModelMixin",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.mixins",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.ListAPIView",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "serializers.ProductTypeSerializer",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "omsApp.models.ProductType.objects.all",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "omsApp.models.ProductType.objects",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "omsApp.models.ProductType",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "omsApp.models.ProductType.objects.all",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "omsApp.models.ProductType.objects",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "omsApp.models.ProductType",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.db.models.Q",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "rest_framework.mixins.CreateModelMixin",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.mixins",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.ListAPIView",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "serializers.ProductSerializer",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "omsApp.models.Product.objects.all",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "omsApp.models.Product.objects",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "omsApp.models.Product",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "omsApp.models.Product.objects.all",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "omsApp.models.Product.objects",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "omsApp.models.Product",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "django.db.models.Q",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "rest_framework.generics.RetrieveUpdateDestroyAPIView",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "serializers.ProductSerializer",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "permissions.IsOwnerOrReadOnly",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "omsApp.models.Product.objects.all",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "omsApp.models.Product.objects",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "omsApp.models.Product",
"line_number": 60,
"usage_type": "name"
}
] |
296619409
|
import os, logging, fileinput, subprocess, re
from datetime import datetime, timedelta
from .Switch import switch
from . import is_a_cme, is_a_docker, docker_run
def set_clock(newtime):
''' use the system 'date' command to set it
format of newtime string: "%Y-%m-%dT%H:%M:%S.SSSSSS"
TODO: parse/validate the format
This function does nothing if not a cme device.
'''
if not is_a_cme():
return
cmd = ['date', '-s', newtime]
if is_a_docker():
docker_run(cmd)
else:
subprocess.run(cmd)
def check_ntp():
''' Requests ntpd status from the system.
Returns True if ntp is currently being used.
If not a cme device, this function always returns True.
'''
if not is_a_cme():
return True
cmd = ['systemctl', 'is-active', 'ntp']
if is_a_docker():
result = docker_run(cmd)
else:
result = subprocess.run(cmd, stdout=subprocess.PIPE).stdout.decode().rstrip()
return result.lower() == 'active'
def manage_clock(user_settings):
''' Manage the NTP daemon and servers used in ntp.conf.
Function does nothing if not a cme device.
'''
clock_settings = user_settings['clock']
update_ntp = False
current_ntp = check_ntp()
current_servers = ntp_servers()
new_use_ntp = clock_settings['ntp']
new_ntp_servers = clock_settings['servers']
# NTP init
# Note that ntp should NOT be setup in init.d to start automatically:
# root@minibian:~# systemctl disable ntp
logger = logging.getLogger(__name__)
logger.info("NTP\t\t\tSetting\t(current)")
logger.info("\tUSE NTP:\t{0}\t({1})".format(new_use_ntp, current_ntp))
logger.info("\tSERVERS:\t{0}\t({1})".format(new_ntp_servers, current_servers))
if not is_a_cme():
logger.info("\tWARNING: Not a recognized CME platform - no actual changes will be made!")
return
if new_ntp_servers != current_servers:
update_ntp = True
ntp_servers(new_ntp_servers)
if update_ntp or (new_use_ntp != current_ntp):
ntp_enable = ['systemctl', 'enable', 'ntp']
ntp_start = ['systemctl', 'start', 'ntp']
ntp_disable = ['systemctl', 'disable', 'ntp']
ntp_stop = ['systemctl', 'stop', 'ntp']
if new_use_ntp:
logger.info("Starting NTP service.")
if is_a_docker():
docker_run(ntp_enable)
docker_run(ntp_start)
else:
subprocess.run(ntp_enable)
subprocess.run(ntp_start)
else:
logger.info("Stopping NTP service.")
if is_a_docker():
docker_run(ntp_stop)
docker_run(ntp_disable)
else:
subprocess.run(ntp_stop)
subprocess.run(ntp_disable)
def refresh_time(clock_settings):
''' Update the current clock settings with values from the system.
Does not update settings if not a cme device.
'''
# if useNTP, we'll update the NTP status
if clock_settings['ntp'] and is_a_cme():
cmd = ['ntpq', '-pn']
if is_a_docker():
result = docker_run(cmd)
else:
result = subprocess.run(cmd, stdout=subprocess.PIPE).stdout.decode()
last_request, last_success = __parse_ntpq(result)
clock_settings['status'] = [ last_request, last_success ]
else:
clock_settings['status'] = [ '-', '-' ]
# read ntp servers from /etc/ntp.conf
clock_settings['servers'] = ntp_servers()
def ntp_servers(new_servers=None):
'''
Reads current NTP servers from /etc/ntp.conf.
If new_servers is not None, then ntp.conf will
be updated with the new servers. ntp service restart
will be required to pick up the new servers.
No changes are made if not a cme device.
'''
ntp_conf = "/etc/ntp.conf"
servers = new_servers or []
servers_added = False
writing = new_servers is not None and is_a_cme()
# the fileinput hijacks std.output, so the prints below go to the
# file, not the console.
with fileinput.input(files=(ntp_conf), inplace=writing) as f:
for line in f:
line = line.strip()
# read (and dup lines if writing) to "server" entry(ies)
if not line.startswith("server"):
if writing:
print(line)
continue
# insert new servers
if writing and not servers_added:
servers_added = True
for s in new_servers:
print("server {0} iburst".format(s))
print()
# append found servers if we're reading
if not writing:
# server line format (we want the address in the middle)
# server abc.def.123.100 iburst
servers.append(line.split()[1])
# EDGE CASE: We're updating the servers, but there were no current
# servers in the file (and thus no "server" lines), so we've reached
# the end of the file without adding our new_servers. If we do
# actually have some new_servers, we'll add them now at the end of the file.
if writing and not servers_added:
with open(ntp_conf, "a") as f:
f.write('\n# NTP servers\n')
for s in new_servers:
f.write("server {0} iburst\n".format(s))
f.write('\n')
return servers
def __parse_ntpq(ntpq_result):
''' Parse the ntpq output for NTP status
good referece: http://www.linuxjournal.com/article/6812
'''
# remove header lines
start = ntpq_result.find('===\n')
if not start:
return "-", "-"
servers = ntpq_result[start+4:]
# find NTP primary server (has * at beginning)
exp = ("\*((?P<remote>\S+)\s+)"
"((?P<refid>\S+)\s+)"
"((?P<st>\S+)\s+)"
"((?P<t>\S+)\s+)"
"((?P<when>\S+)\s+)"
"((?P<poll>\S+)\s+)"
"((?P<reach>\S+)\s+)"
"((?P<delay>\S+)\s+)"
"((?P<offset>\S+)\s+)"
"((?P<jitter>\S+)\s+)")
regex = re.compile(exp, re.MULTILINE)
r = regex.search(servers)
# did we find primary server?
if not r:
# we'll search again w/o "*" at beginning
exp = (" ((?P<remote>\S+)\s+)"
"((?P<refid>\S+)\s+)"
"((?P<st>\S+)\s+)"
"((?P<t>\S+)\s+)"
"((?P<when>\S+)\s+)"
"((?P<poll>\S+)\s+)"
"((?P<reach>\S+)\s+)"
"((?P<delay>\S+)\s+)"
"((?P<offset>\S+)\s+)"
"((?P<jitter>\S+)\s+)")
regex = re.compile(exp, re.MULTILINE)
r = regex.search(servers)
if not r:
return "-", "-"
data = r.groupdict()
# create a timestamp for last polling time
# this integer can have units: m = minutes, h = hours, d = days
units = ['m', 'h', 'd']
last_poll = data['when']
last_poll_unit = list(last_poll)[-1]
if "-" in list(last_poll):
return "-", "-"
# is there a unit character?
if last_poll_unit in units:
for case in switch(last_poll_unit):
if case('m'): # minutes
last_poll_s = int(last_poll[:-1]) * 60
break
if case('h'): # hours
last_poll_s = int(last_poll[:-1]) * 60 * 60
break
if case('d'): # days
last_poll_s = int(last_poll[:-1]) * 24 * 60 * 60
else:
last_poll_s = int(last_poll)
last_poll_time = (datetime.utcnow() - timedelta(seconds=last_poll_s)).isoformat()
# TODO: we were getting an error parsing 'reach'. This was a quick fix patch,
# but we really need to parse the ntpq results better.
# how often are we polling
try:
poll_s = int(data['poll'])
except:
poll_s = 64
# look at the "reach" to calculate a last success time
try:
reach = int(data['reach'], 8) # convert from Octal representation
except:
reach = int('377', 8)
# TODO END: Quick fix
# edge cases
if reach == 0:
last_success_time = "-"
elif reach == 255:
last_success_time = last_poll_time
# Else the "reach" field is an 8-bit set that holds 0's for unsuccessful
# polls (starting from the last_poll_s). We search from the LSB to
# the left for the first non-zero (i.e., successful poll).
# E.g., (see the linked article above), but if we've had 8 successful
# NTP requests, then reach = 1111 1111 (255 decimal, 377 octal). Now
# consider the next request is unsuccessful, the MSB is shifted out and
# reach = 1111 1110 (253 decimal, 376 octal). The last_successful poll
# would have been 1 polling period earlier (first non-zero bit from left).
# We use the "poll" field to tell how many seconds between polling then
# use the first non-zero bit position as the multiplier.
else:
last_success_s = (last_poll_s + __lowestSet(reach) * poll_s)
last_success_time = (datetime.utcnow() - timedelta(seconds=(last_success_s))).isoformat() + 'Z'
return last_poll_time, last_success_time
# find the lowest bit set in an int
# from https://wiki.python.org/moin/BitManipulation
def __lowestSet(int_type):
low = (int_type & -int_type)
lowBit = -1
while (low):
low >>= 1
lowBit += 1
return(lowBit)
| null |
ClockUtils.py
|
ClockUtils.py
|
py
| 8,241 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "subprocess.run",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "fileinput.input",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "re.MULTILINE",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "re.MULTILINE",
"line_number": 227,
"usage_type": "attribute"
},
{
"api_name": "Switch.switch",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 259,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 296,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 296,
"usage_type": "call"
}
] |
371221382
|
import cv2
import matplotlib.pyplot as plt
import numpy
import scipy.io
mat = scipy.io.loadmat('./devkit/cars_train_annos.mat')
annotations = mat['annotations'][0]
scaled_size = 100
img_file = './cars_train/00001.jpg'
original_img = cv2.imread(img_file, cv2.IMREAD_GRAYSCALE)
scaled_img = cv2.resize(original_img, (scaled_size, scaled_size))
bboxes = annotations[0]
x1 = int(bboxes[0][0][0] * (scaled_size / original_img.shape[1]))
y1 = int(bboxes[1][0][0] * (scaled_size / original_img.shape[0]))
x2 = int(bboxes[2][0][0] * (scaled_size / original_img.shape[1]))
y2 = int(bboxes[3][0][0] * (scaled_size / original_img.shape[0]))
bounded_img = cv2.rectangle(scaled_img, (x1, y1), (x2, y2), (255,0,0), 1)
plt.imshow(bounded_img, cmap='gray')
plt.show()
| null |
test_bbox.py
|
test_bbox.py
|
py
| 754 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "scipy.io.io.loadmat",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "scipy.io.io",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "scipy.io",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "cv2.imread",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_GRAYSCALE",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
}
] |
85438145
|
from scipy import signal
from scipy.interpolate import interp1d
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from config import INTERP_TYPE, FILTER_CONF
def intervals_from_mask(mask, polarity=True):
"""
Convert bool mask to slices with given polarity
Ex.: [0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1] -> [(3,6), (8,9), (11,13)]
Resulting tuples are ready for slicing, i.e.
for i, j in intervals_from_mask(mask):
assert mask[i:j].all() == True
"""
if len(mask) == 0:
return []
if mask[0] == polarity:
start_idx = 0
else:
start_idx = 1
intervalends = np.nonzero(np.diff(mask) != False)[0]
intervalends = [-1] + list(intervalends) + [len(mask) - 1]
return [
(intervalends[i] + 1, intervalends[i + 1] + 1)
for i in range(start_idx, len(intervalends) - 1, 2)
]
def smooth_intervals(intervals, smallest_gap=30, smallest_length=20):
def merge_interval_with_next(intervals, i):
before = intervals[:i]
merged = (intervals[i][0], intervals[i + 1][1])
after = intervals[i + 2 :] if i + 2 < len(intervals) else []
return [*before, merged, *after]
while True:
gaps = [
intervals[i + 1][0] - intervals[i][1] for i in range(len(intervals) - 1)
]
if len(gaps) == 0 or min(gaps) > smallest_gap:
break
else:
intervals = merge_interval_with_next(intervals, np.argmin(gaps))
while True:
lengths = [i[1] - i[0] for i in intervals]
if len(lengths) == 0 or min(lengths) > smallest_length:
break
else:
intervals.pop(np.argmin(lengths))
return intervals
def normalize_array(a: np.ndarray) -> np.ndarray:
return a / np.std(a)
def timedelta2sec(dt: np.timedelta64):
return dt / np.timedelta64(10 ** 9, "ns")
def extract_time_series(df):
"""Convert dataframe (2 columns) to 2 numpy arrays"""
t = df.iloc[:, 0].to_numpy()
t = t - t[0]
y = df.iloc[:, 1].to_numpy()
mask = np.logical_not(np.isnan(y))
return (timedelta2sec(t[mask]), y[mask])
def to_uniform_grid(x, *y):
"""Interpolate signals defined on arbitrary grid to uniform
Uniform grid is inferred from x to have the same median step.
Args:
x (np.ndarray | pd.Series): original time stamps in numeric or timedelta64(ns)
*y (np.ndarray | pd.Series): arbitrary number of time series
OR x (pd.DataFrame): first column is treated as timestamps, others — as time series
Returns:
x_grid, y_grid (np.ndarray): same signature as input, but interpoalated on grid
"""
if not y:
df = x
x = df.iloc[:, 0]
y = []
for i in range(1, len(df.columns)):
y.append(df.iloc[:, i].to_numpy())
numpy_args = []
for s in (x, *y):
if isinstance(s, pd.Series):
numpy_args.append(s.to_numpy())
else:
numpy_args.append(s)
x, *ys = numpy_args
x = x - x[0]
if x.dtype == np.dtype("<m8[ns]"):
x = timedelta2sec(x)
else:
try:
x = x.astype(np.dtype("float64"))
except ValueError:
raise ValueError(
"Time stamps must be timedelta64 or be convertible to float64"
)
x_step = np.median(np.diff(x))
x_start = x[0]
x_stop = x[-1]
x_grid = np.arange(x_start, x_stop, x_step)
ys_grid = []
for y in ys:
mask = np.logical_not(np.isnan(y))
y_grid = interp1d(x[mask], y[mask], kind=INTERP_TYPE, fill_value="extrapolate")(
x_grid
)
ys_grid.append(y_grid)
return x_grid, ys_grid
def create_butterworth_hpf(cutoff_hz, slope_db_oct, timestamps, filter_out="sos"):
fs_hz = 1 / (timestamps[1] - timestamps[0])
nyq_hz = 0.5 * fs_hz
wp = cutoff_hz / nyq_hz # lower edge of the passband
k = 3 # more or less arbitrary, >=1
ws = wp / k
gpass = 1
gstop = slope_db_oct * k / 2 # /2 is purely empiric. don't judge.
N, Wn = signal.buttord(wp, ws, gpass, gstop)
# print('butterworth\'s filter N =', N)
return signal.butter(N, Wn, btype="highpass", output=filter_out)
def filter_array(t, x):
sos = create_butterworth_hpf(FILTER_CONF["cutoff"], FILTER_CONF["slope"], t)
return signal.sosfiltfilt(sos, x)
def plot_filter_response(cutoff_hz, slope_db_oct, fs_hz):
b, a = create_butterworth_hpf(cutoff_hz, slope_db_oct, fs_hz, filter_out="ba")
w, h = signal.freqz(b, a, fs=fs_hz, worN=np.logspace(-4, -2, 50))
plt.semilogx(w, 20 * np.log10(abs(h)))
plt.title("Butterworth filter frequency response")
plt.xlabel("Frequency [radians / second]")
plt.ylabel("Amplitude [dB]")
plt.margins(0, 0.1)
plt.grid(which="both", axis="both")
plt.axvline(cutoff_hz, color="green") # cutoff frequency
plt.show()
if __name__ == "__main__":
print(intervals_from_mask([0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1]))
print(smooth_intervals([(3, 6), (8, 9), (11, 13)]))
| null |
height_correction/filtering.py
|
filtering.py
|
py
| 5,230 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.nonzero",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "numpy.std",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.timedelta64",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "numpy.timedelta64",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.logical_not",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "numpy.dtype",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "numpy.dtype",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.logical_not",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "scipy.interpolate.interp1d",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "config.INTERP_TYPE",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "scipy.signal.buttord",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "scipy.signal",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "scipy.signal.butter",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "scipy.signal",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "config.FILTER_CONF",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "scipy.signal.sosfiltfilt",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "scipy.signal",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "scipy.signal.freqz",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "scipy.signal",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "numpy.logspace",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.semilogx",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "numpy.log10",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.margins",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axvline",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 159,
"usage_type": "name"
}
] |
250417890
|
"""A script for converting zero-filled examples into examples with raw morphophonemes
Raw morphophonemes are named according to a set of principal forms
(principal parts) which are assumed to reflect all morphophonemic
alternations in a lexeme. The script assumes that the examples within
a lexeme occur all in the same order as for their form.
In order to apply this script to another language, one must revise the
principal_set and the feat2mphons values. The code still relies on
the assumption that the stem comes as the first morph (and thus some
minor modifications would be required in order to handle prefixing
languages).
© Kimmo Koskenniemi, 2018. This is free software under GPL 3 license.
"""
import argparse
argparser = argparse.ArgumentParser(
"python3 zerofilled2raw.py",
description="Forms raw morphophonemes out of zero-filled morphs")
argparser.add_argument(
"input",
default="ksk-zerofilled.csv",
help="zero-filled example words as a CSV file")
argparser.add_argument(
"output",
default="ksk-raw-examp.csv",
help="example word with raw morhpophonemes as a CSV file")
argparser.add_argument(
"affix_info",
default="demo-affix-info.csv",
help="principal forms and morphophonemic affixes as a CSV file")
argparser.add_argument(
"-s", "--morph-separator",
default=".",
help="separator between morphs in the word form")
argparser.add_argument(
"-d", "--csv-delimiter",
default=",",
help="delimiter between the fields")
argparser.add_argument(
"-n", "--name-separator",
default=" ",
help="separator between morpheme names in the morpheme list")
argparser.add_argument(
"-z", "--zero-symbol",
default="Ø",
help="symbol inserted in word forms to align them")
argparser.add_argument(
"-v", "--verbosity",
default=0,
type=int,
help="level of diagnostic and debugging output")
args = argparser.parse_args()
import re
import csv
from collections import OrderedDict
from orderedset import OrderedSet
principal_set = OrderedSet()
""""Set of principal forms or principal parts, i.e. the forms which
uniquely determine the morphophonemic variations that may occur within
the stem """
feat2mphons = {}
# Read in the feature combinations of principal forms and
# the morphophonemic representations of affix features
with open(args.affix_info, "r") as afffil:
affrdr = csv.reader(afffil, delimiter=args.csv_delimiter)
for row in affrdr:
if row[1] == '+':
principal_set.add(row[0])
else:
feat2mphons[row[0]] = row[1]
#print("principal_set =", principal_set)####
#print("feat2mphons =", feat2mphons)####
# Read in the morpheme names and the zero-filled morphs
stem_morpheme_data = OrderedDict()
"""Indexed by stem morpheme name, value is a list of the original data
for that stem morpheme. Each value consists of a tuple of fields
(MORPHEMES, MORPHS, ALIGNED) in the original data."""
with open(args.input, "r") as infil:
rdr = csv.DictReader(infil, delimiter=args.csv_delimiter)
for row in rdr:
names = row["MORPHEMES"].strip()
orig_morphs = row["MORPHS"].strip()
zerof_morphs = row["ZEROFILLED"].strip()
if (not names) or (not zerof_morphs):
continue
name_lst = names.split(args.name_separator, maxsplit=1)
stem_name = name_lst[0]
form_name = " ".join(name_lst[1:]) if len(name_lst) > 1 else ""
zerof_morph_lst = zerof_morphs.split(args.morph_separator, maxsplit=1)
if stem_name not in stem_morpheme_data:
stem_morpheme_data[stem_name] = []
stem_morpheme_data[stem_name].append((form_name, orig_morphs, zerof_morph_lst))
ofil = open(args.output, "w")
writer = csv.DictWriter(ofil, fieldnames=["MORPHEMES", "MORPHS", "ZEROFILLED", "RAW"])
writer.writeheader()
for stem_morpheme, data_lst in stem_morpheme_data.items():
princ_zstem_lst =[]
# select the principal forms of this stem morpheme
for data in data_lst:
form_name, orig_morphs, zerof_morph_lst = data
if form_name in principal_set:
princ_zstem_lst.append(zerof_morph_lst[0])
# print("principal zero-filled stems:[", stem_morpheme, "]", princ_zstem_lst)###
# form the raw morphophonemes by combining corresponding symbols
l = len(princ_zstem_lst[0])
zstem_rawsym_lst = []
for i in range(l):
lst = []
for princ_zstem in princ_zstem_lst:
lst.append(princ_zstem[i])
# print(stem_morpheme, i, lst)###
raw_seq = "".join(lst)
if re.match(r"^(.)(\1)*$", raw_seq):
raw_sym = raw_seq[0] # abbreviate if all identical
else:
raw_sym = "{" + raw_seq + "}"
zstem_rawsym_lst.append(raw_sym)
zstem_pairsym_str = " ".join(zstem_rawsym_lst)
# Output the data augmented with the representation with raw morphophonemes
for data in data_lst:
form_name, orig_morphs, zerof_morph_lst = data
row["MORPHEMES"] = (stem_morpheme + " " + form_name).strip()
row["MORPHS"] = orig_morphs
orig_zerof_morphs = args.morph_separator.join(zerof_morph_lst)
row["ZEROFILLED"] = orig_zerof_morphs
raw_lst = [zstem_pairsym_str]
feat_lst = form_name.split(" ")
for feat in feat_lst:
raw_lst.append(feat2mphons[feat])
row["RAW"] = " ".join(raw_lst)
writer.writerow(row)
| null |
zerofilled2raw.py
|
zerofilled2raw.py
|
py
| 5,450 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "orderedset.OrderedSet",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "csv.DictReader",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "csv.DictWriter",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 125,
"usage_type": "call"
}
] |
557598148
|
from __future__ import division
import pygame
from pygame.locals import *
import map
import character
import math
import console
class GG2:
"""
Central class
"""
# This is to replace the gmk "all" and also to update everything.
GameObjectList = []
Xview = 0
Yview = 0
def __init__(self):
# All drawing should be done on the Surface object
self.Window = pygame.display.set_mode((800, 600), HWSURFACE | DOUBLEBUF)
self.Surface = self.Window
self.sysFont = pygame.font.SysFont("None", 30)
self.Wview = self.Window.get_width()
self.Hview = self.Window.get_height()
self.gameMap = map.Map(self)
self.Myself = character.Scout(self)
self.console = console.Console(self)
self.clock = pygame.time.Clock()
# text drawing is quite expensive, save it
self.fps = 0
self.fpstext = self.sysFont.render("0 FPS", 0, (130, 130, 130))
def update(self, frametime):
for obj in self.GameObjectList: obj.beginStep(frametime)
for obj in self.GameObjectList: obj.step(frametime)
for obj in self.GameObjectList: obj.endStep(frametime)
for obj in self.GameObjectList:
if obj.destroyInstance:
obj.destroy()
self.console.update()
def render(self):
# get info
self.Xview = self.Myself.x - self.Wview/2
self.Yview = self.Myself.y - self.Hview/2
# draw background
self.Surface.fill((245, 245, 235))
self.gameMap.draw()
for obj in self.GameObjectList: obj.draw()
# text drawing is quite expensive, save it
newfps = int(self.clock.get_fps())
if newfps != self.fps:
self.fps = newfps
self.fpstext = pygame.font.SysFont("None", 30).render("%d FPS" % self.fps, 0, (130, 130, 130))
self.Surface.blit(self.fpstext, (10, 10))
if self.console.show:
self.Surface.blit(self.console.imgText, (120, 10))
pygame.display.update()
| null |
gg2.py
|
gg2.py
|
py
| 1,823 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pygame.display.set_mode",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.SysFont",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "map.Map",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "character.Scout",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "console.Console",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pygame.time.Clock",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.SysFont",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 76,
"usage_type": "attribute"
}
] |
628916780
|
import requests
import json
import time
from bs4 import BeautifulSoup
from django.contrib.gis.geos import GEOSGeometry
from routepang.model.LocationModel import Location
from routepang.model.PreciousData import PreciousData
# request에 해당하는 명소(영어명)
# 최대 60개까지 가져옴
# json배열 형태로 return
class LocationController:
def __init__(self):
self.google_api_key = PreciousData.mapKey
self.category = ["attraction", "food", "cafe"]
def getLocationList(self, request):
for i in range(3):
location_list = []
next_page_token = ""
# next_page_token이 없을 때까지 넘어가면서 파싱
while True:
# attraction / food / cafe 정도로 category를 나눌 예정
request_url = "https://maps.googleapis.com/maps/api/place/textsearch/json?query=" + request + "+" + \
self.category[i] + "&key=" + self.google_api_key + "&pagetoken=" + next_page_token + \
"&language=ko"
req = requests.get(request_url)
html = req.text
soup = BeautifulSoup(html, 'html.parser')
# json 형태로 데이터 정제
json_data = json.loads(str(soup))
json_loaction_result = json_data["results"]
location_list = location_list + json_loaction_result
try:
next_page_token = json_data["next_page_token"]
# 마지막 페이지에서는
# next_page_token 키가 없기 때문에
# 키에러가 발생
except KeyError:
next_page_token = "END"
if next_page_token == "END":
break
else:
time.sleep(2)
self.insertLocation(request=location_list, category=i+1)
# for i in location_list:
# print(i)
return
# 인스크램 크롤링 목록
# 태그 검색을 위해 공백 X
def getLocationNameList(self, request):
nameList = []
for i in request:
# replace를 쓰기 위해 string으로 형변환
place = str(i["name"])
nameList.append(place.replace(" ", ""))
return nameList
# json 배열을 request
# json형태의 데이터를 디비에 저장
def insertLocation(self, request, category):
for i in request:
name = i["name"]
# location_name으로 중복 검사
if not Location.objects.filter(name=name).exists():
place_id = i["place_id"]
address = i["formatted_address"]
lon = i["geometry"]["location"]["lng"]
lat = i["geometry"]["location"]["lat"]
coordinates = GEOSGeometry('POINT(' + str(lon) + ' ' + str(lat) + ')')
#category는 추후 개선
try:
image = i["photos"][0]["photo_reference"]
except KeyError:
image = "no image"
Location(place_id=place_id, address=address, name=name, coordinates=coordinates,
image=image, category=category).save()
return
| null |
controller/LocalController.py
|
LocalController.py
|
py
| 3,357 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "routepang.model.PreciousData.PreciousData.mapKey",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "routepang.model.PreciousData.PreciousData",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "routepang.model.LocationModel.Location.objects.filter",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "routepang.model.LocationModel.Location.objects",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "routepang.model.LocationModel.Location",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "django.contrib.gis.geos.GEOSGeometry",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "routepang.model.LocationModel.Location",
"line_number": 99,
"usage_type": "call"
}
] |
463826422
|
# coding: utf-8
import os
import timeit
from collections import OrderedDict
import meshio
import numpy as np
from mgmetis import metis
__all__ = ['meshpart']
def meshpart(size, filename):
if size > 1:
paramesh(size, filename)
else:
seqmesh(filename)
def seqmesh(filename):
def load_gmsh_mesh(filename):
#mesh = meshio.gmsh.read(filename)
mesh = meshio.read(filename)
return mesh
def create_cell_nodeid(mesh):
cell_nodeid = []
if type(mesh.cells) == dict:
cell_nodeid = mesh.cells["triangle"]
elif type(mesh.cells) == list:
cell_nodeid = mesh.cells[1].data
for i in range(len(cell_nodeid)):
cell_nodeid[i].sort()
return cell_nodeid
def define_ghost_node(mesh, nodes):
ghost_nodes = [0]*len(nodes)
if type(mesh.cells) == dict:
for i, j in mesh.cell_data.items():
if i == "line":
ghost = j.get('gmsh:physical')
for i, j in mesh.cells.items():
if i == "line":
for k in range(len(j)):
for index in range(2):
if ghost[k] > 2:
ghost_nodes[j[k][index]] = int(ghost[k])
for i, j in mesh.cells.items():
if i == "line":
for k in range(len(j)):
for index in range(2):
if ghost[k] <= 2:
ghost_nodes[j[k][index]] = int(ghost[k])
elif type(mesh.cells) == list:
ghost = mesh.cell_data['gmsh:physical'][0]
for i in range(len(mesh.cells[0].data)):
for j in range(2):
if ghost[i] > 2:
ghost_nodes[mesh.cells[0].data[i][j]] = int(ghost[i])
for i in range(len(mesh.cells[0].data)):
for j in range(2):
if ghost[i] <= 2:
ghost_nodes[mesh.cells[0].data[i][j]] = int(ghost[i])
return ghost_nodes
def create_nodes(mesh):
nodes = []
nodes = mesh.points
return nodes
start = timeit.default_timer()
#load mesh
mesh = load_gmsh_mesh(filename)
#coordinates x, y of each node
nodes = create_nodes(mesh)
#nodes of each cell
cell_nodeid = create_cell_nodeid(mesh)
ghost_nodes = define_ghost_node(mesh, nodes)
if os.path.exists("mesh"+str(0)+".txt"):
os.remove("mesh"+str(0)+".txt")
with open("mesh"+str(0)+".txt", "a") as text_file:
text_file.write("elements\n")
np.savetxt(text_file, cell_nodeid, fmt='%u')
text_file.write("endelements\n")
with open("mesh"+str(0)+".txt", "a") as text_file:
text_file.write("nodes\n")
for i in range(len(nodes)):
for j in range(3):
text_file.write(str(nodes[i][j])+str(" "))
text_file.write(str(ghost_nodes[i]))
text_file.write("\n")
text_file.write("endnodes\n")
stop = timeit.default_timer()
print('Global Execution Time: ', stop - start)
def paramesh(size, filename):
def load_gmsh_mesh(filename):
mesh = meshio.read(filename)
return mesh
def create_cell_nodeid(mesh):
cell_nodeid = []
if type(mesh.cells) == dict:
cell_nodeid = mesh.cells["triangle"]
elif type(mesh.cells) == list:
cell_nodeid = mesh.cells[1].data
for i in range(len(cell_nodeid)):
cell_nodeid[i].sort()
return cell_nodeid
def define_ghost_node(mesh, nodes):
ghost_nodes = [0]*len(nodes)
if type(mesh.cells) == dict:
for i, j in mesh.cell_data.items():
if i == "line":
ghost = j.get('gmsh:physical')
for i, j in mesh.cells.items():
if i == "line":
for k in range(len(j)):
for index in range(2):
if ghost[k] > 2:
ghost_nodes[j[k][index]] = int(ghost[k])
for i, j in mesh.cells.items():
if i == "line":
for k in range(len(j)):
for index in range(2):
if ghost[k] <= 2:
ghost_nodes[j[k][index]] = int(ghost[k])
elif type(mesh.cells) == list:
ghost = mesh.cell_data['gmsh:physical'][0]
for i in range(len(mesh.cells[0].data)):
for j in range(2):
if ghost[i] > 2:
ghost_nodes[mesh.cells[0].data[i][j]] = int(ghost[i])
for i in range(len(mesh.cells[0].data)):
for j in range(2):
if ghost[i] <= 2:
ghost_nodes[mesh.cells[0].data[i][j]] = int(ghost[i])
return ghost_nodes
start = timeit.default_timer()
#load mesh
mesh = load_gmsh_mesh(filename)
#coordinates x, y of each node
nodes = mesh.points#create_nodes(mesh.points)
#nodes of each cell
cell_nodeid = create_cell_nodeid(mesh)
cell_nodeiddict = {tuple(cell_nodeid[0]): 0}
for i in range(1, len(cell_nodeid)):
cell_nodeiddict[tuple(cell_nodeid[i])] = i
#ghost nodes
ghost_nodes = define_ghost_node(mesh, nodes)
stopmesh = timeit.default_timer()
print("Reading mesh", stopmesh-start)
nbelements = len(cell_nodeid)
nbnodes = len(nodes)
print("Number of Cells : ", nbelements)
print("Number of Nodes : ", nbnodes)
#Partitioning mesh
if size > 1:
objval, epart, npart = metis.part_mesh_dual(size, cell_nodeid)
stopmetis = timeit.default_timer()
print("METIS partitionning in ", size, "partitions", stopmetis - stopmesh)
node_parts = OrderedDict()
cell_parts = OrderedDict()
node_part = [[] for i in range(size)]
cell_part = [[] for i in range(size)]
globnodetoloc = OrderedDict()
locnodetoglob = OrderedDict()
globcelltoloc = OrderedDict()
neighsub = [[] for i in range(size)]
halo_cellid = [[] for i in range(size)]
npart = [[] for i in range(nbnodes)]
cpart = [[] for i in range(nbelements)]
for i in range(nbelements):
for j in range(3):
if epart[i] not in npart[cell_nodeid[i][j]]:
npart[cell_nodeid[i][j]].append(epart[i])
for i in range(nbelements):
for j in range(3):
for k in range(len(npart[cell_nodeid[i][j]])):
if npart[cell_nodeid[i][j]][k] not in cpart[i]:
cpart[i].append(npart[cell_nodeid[i][j]][k])
cpart[i].sort()
#Create dict of nodes/cells for each partition
for i in range(nbelements):
for j in range(3):
k = cell_nodeid[i][j]
node_parts[epart[i], k] = [nodes[k][0], nodes[k][1], nodes[k][2], ghost_nodes[k]]
cell_parts[epart[i], i] = cell_nodeid[i]
#Create list of nodes/cells for each partition and local to global indexation
for i, j in node_parts.items():
node_part[i[0]].append(j)
globnodetoloc[i[0], i[1]] = len(node_part[i[0]])-1
locnodetoglob[i[0], len(node_part[i[0]])-1] = i[1]
if len(npart[i[1]]) > 1:
for index in range(len(npart[i[1]])):
if (npart[i[1]][index] not in neighsub[i[0]] and npart[i[1]][index] != i[0]):
neighsub[i[0]].append(npart[i[1]][index])
neighsub[i[0]].sort()
for i, j in cell_parts.items():
cell_part[i[0]].append(j)
globcelltoloc[i[0], len(cell_part[i[0]])-1] = i[1]
#globcelltoloc[i[0],i[1]] = len(cell_part[i[0]])
stopstruc = timeit.default_timer()
print("Create local structure for each proc", stopstruc - stopmetis)
for i in range(size):
for j in cell_part[i]:
if (len(npart[j[0]]) + len(npart[j[1]]) + len(npart[j[2]])) > 3:
halo_cellid[i].append(j)
haloint = OrderedDict()
haloext = OrderedDict()
for i in range(size):
for j in halo_cellid[i]:
cell = cell_nodeiddict.get(tuple(j))
for k in range(len(cpart[cell])):
if i != cpart[cell][k]:
haloint.setdefault((i, cpart[cell][k]), []).append(cell)
haloext.setdefault((cpart[cell][k], i), []).append(cell)
for i in range(size):
for j in range(len(cell_part[i])):
cell_part[i][j] = [globnodetoloc[i, cell_part[i][j][0]],
globnodetoloc[i, cell_part[i][j][1]],
globnodetoloc[i, cell_part[i][j][2]]]
stophalo = timeit.default_timer()
print("Creating halo structure", stophalo - stopstruc)
centvol = [[] for i in range(size)]
for i in range(size):
for j in range(len(neighsub[i])):
for k in range(len(haloext[(i, neighsub[i][j])])):
s_1 = cell_nodeid[haloext[(i, neighsub[i][j])][k]][0]
s_2 = cell_nodeid[haloext[(i, neighsub[i][j])][k]][1]
s_3 = cell_nodeid[haloext[(i, neighsub[i][j])][k]][2]
x_1 = nodes[s_1][0]
y_1 = nodes[s_1][1]
x_2 = nodes[s_2][0]
y_2 = nodes[s_2][1]
x_3 = nodes[s_3][0]
y_3 = nodes[s_3][1]
centvol[i].append([1./3 * (x_1 + x_2 + x_3), 1./3*(y_1 + y_2 + y_3),
(1./2) * abs((x_1-x_2)*(y_1-y_3)-(x_1-x_3)*(y_1-y_2))])
for i in range(size):
if os.path.exists("mesh"+str(i)+".txt"):
os.remove("mesh"+str(i)+".txt")
for i in range(size):
with open("mesh"+str(i)+".txt", "a") as text_file:
text_file.write("elements\n")
np.savetxt(text_file, cell_part[i], fmt='%u')
text_file.write("endelements\n")
text_file.write("nodes\n")
np.savetxt(text_file, node_part[i])
text_file.write("endnodes\n")
text_file.write("halosint\n")
for j in range(len(neighsub[i])):
for k in range(len(haloint[(i, neighsub[i][j])])):
text_file.write(str(haloint[(i, neighsub[i][j])][k]))
text_file.write("\n")
text_file.write("endhalosint\n")
text_file.write("halosext\n")
for j in range(len(neighsub[i])):
for k in range(len(haloext[(i, neighsub[i][j])])):
text_file.write(str(cell_nodeid[haloext[(i, neighsub[i][j])][k]][0])+" "+
str(cell_nodeid[haloext[(i, neighsub[i][j])][k]][1])+" "+
str(cell_nodeid[haloext[(i, neighsub[i][j])][k]][2]))
text_file.write("\n")
text_file.write("endhalosext\n")
text_file.write("centvol\n")
np.savetxt(text_file, centvol[i])
text_file.write("endcentvol\n")
text_file.write("globalcelltolocal\n")
for j in range(len(cell_part[i])):
text_file.write(str(globcelltoloc[i, j]))
text_file.write("\n")
text_file.write("endglobalcelltolocal\n")
text_file.write("localnodetoglobal\n")
for j in range(len(node_part[i])):
text_file.write(str(locnodetoglob[i, j]))
text_file.write("\n")
text_file.write("endlocalnodetoglobal\n")
text_file.write("neigh\n")
for j in range(len(neighsub[i])):
text_file.write(str(neighsub[i][j])+ " ")
text_file.write("\n")
for j in neighsub[i]:
text_file.write(str(len(haloint[(i, j)]))+ " ")
text_file.write("\n")
text_file.write("endneigh\n")
stopfile = timeit.default_timer()
print("save structures in files", stopfile - stophalo)
stop = timeit.default_timer()
print('Global Execution Time: ', stop - start)
| null |
manapy/ddm/meshpartitioning.py
|
meshpartitioning.py
|
py
| 12,132 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "meshio.read",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "numpy.savetxt",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "meshio.read",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "mgmetis.metis.part_mesh_dual",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "mgmetis.metis",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "timeit.default_timer",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 292,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "numpy.savetxt",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "numpy.savetxt",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "numpy.savetxt",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 342,
"usage_type": "call"
}
] |
344007750
|
from bs4 import BeautifulSoup as bs
from selenium import webdriver
import time
import pandas as pd
# setup
url = 'http://fantasy.nfl.com/league/2785979/history/2015/teamhome?teamId=1'
driver = webdriver.Firefox()
# to be populated
rosters = {}
pages = []
# sign in with selenium driver
driver.get(url)
time.sleep(20)
driver.find_elements_by_id('username')[0].send_keys('dreamteam14')
driver.find_elements_by_id('password')[0].send_keys('google.ca')
driver.find_elements_by_tag_name('button')[0].click()
time.sleep(3)
# get soup for each of the links 1-10
for i in range(1,11):
newurl = '{}{}'.format(url[:-1], i)
driver.get(newurl)
time.sleep(2)
pages.append(driver.page_source)
# get {team: [roster]} from each page, add to dict
for page in pages:
soup = bs(page, 'lxml')
teamName = soup.find('span', {'class':'label'}).text
pagePlayers = []
for player in soup.find_all('a', {'class': 'playerNameFull'}):
pagePlayers.append(player.text)
rosters[teamName] = pagePlayers
rosters = pd.DataFrame(rosters)
rosters.to_csv('LOGrosters.csv', index=False)
| null |
FantasyRosters/getFantasyRosters.py
|
getFantasyRosters.py
|
py
| 1,105 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "selenium.webdriver.Firefox",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 38,
"usage_type": "call"
}
] |
242383201
|
import argparse
#define arguments
parser = argparse.ArgumentParser(description='remove reads that map to contigs that arent part of cycles')
parser.add_argument('ifn_pairedtable', metavar='<ifn_paired table reads', type=str, help='Input paired table of reads')
parser.add_argument('ofn_pairedtable', metavar='<ifn_paired table reads w/ reads removed>', type=str, help='Output table')
parser.add_argument('ofn_stats', metavar='<stats>', type=str, help='stats')
#parse arguments
args = parser.parse_args()
print('Now taking out irrelevant reads...')
otable = open(args.ofn_pairedtable,'w+')
line_count = 0
col_htable = {}
write_count = 0
index = 0
with open(args.ifn_pairedtable) as itable:
for read in itable:
if(line_count == 0):
header = read.split()
for col in header:
col_htable[col] = index
index += 1
otable.write(read)
line_count += 1
continue
line_count += 1
line = read.split()
if line[col_htable['contig1']][0] != 'C' or line[col_htable['contig2']][0] != 'C':
otable.write(read)
write_count += 1
ofile = open(args.ofn_stats,'w+')
ofile.write('Write count %s \n' % write_count)
ofile.write('Total count %s \n' % line_count)
print('Write count %s' % write_count)
print('Total count %s' % line_count)
print('Reads in cycle is %s percent of total reads' % (str(write_count/line_count)))
| null |
md/Python/rem_noncyc_reads.py
|
rem_noncyc_reads.py
|
py
| 1,459 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 4,
"usage_type": "call"
}
] |
447405634
|
import datetime
import streamlit as st
import pandas as pd
from tools.data_tools import *
@st.cache(allow_output_mutation=True)
def get_removed():
removing_dates = {'start_date':[],'end_date':[]}
return removing_dates
def removing_dates(df):
st.subheader("Removing data by period of time")
dates = df['start_date'].unique()
min_date = datetime.datetime.strptime(pd.to_datetime(
str(min(dates))).strftime('%Y-%m-%d'), '%Y-%m-%d').date()
max_date = datetime.datetime.strptime(pd.to_datetime(
str(max(dates))).strftime('%Y-%m-%d'), '%Y-%m-%d').date()
date = st.date_input("Select the dates you don't want to consider",
min_date, min_value=min_date, max_value=max_date)
date_start = to_start_date(date)
date_end = to_end_date(date)
st.write("The period selected is:")
st.write("Start date :", date_start)
st.write("End date :", date_end)
removed_header = st.empty()
removed_list = st.empty()
button1, button2, button3 = st.beta_columns([.2,.2,1])
add_button = button1.button("Add")
clear_button = button2.button("Clear")
remove_button = button3.button("Remove!")
if(add_button):
if((date_start not in get_removed()['start_date']) and (str(date_start) in dates)):
get_removed()['start_date'].append(date_start)
get_removed()['end_date'].append(date_end)
if(clear_button):
get_removed()['start_date'] = []
get_removed()['end_date'] = []
if(remove_button):
for start_date in get_removed()['start_date']:
df.drop(df[df['start_date']== str(start_date)].index, inplace=True)
save_database(df)
get_removed()['start_date'] = []
get_removed()['end_date'] = []
st.success("Periods removed! Please refresh above")
if(len(get_removed()['start_date']) > 0):
removed_header.subheader("Dates to be removed")
removed_list.dataframe(pd.DataFrame(get_removed()).sort_values(by="start_date"))
def page_database_config():
df = load_data()
st.title("Database Configuration")
st.header("Database Summary")
st.write("Number of registers:",len(df))
table,_, refresh = st.beta_columns([3,.2,1])
table.dataframe(df)
refresh.button("Refresh")
with st.beta_expander("Remove data"):
removing_dates(df)
| null |
database_config.py
|
database_config.py
|
py
| 2,453 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "streamlit.cache",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "streamlit.subheader",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pandas.to_datetime",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "pandas.to_datetime",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "streamlit.date_input",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "streamlit.empty",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "streamlit.empty",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "streamlit.beta_columns",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "streamlit.success",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "streamlit.title",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "streamlit.header",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "streamlit.beta_columns",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "streamlit.beta_expander",
"line_number": 68,
"usage_type": "call"
}
] |
212723893
|
############################################################################
# #
# Copyright (c) 2022-2023 Carl Drougge #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
############################################################################
from __future__ import division, print_function
from itertools import cycle
import errno
import os
import sys
from accelerator.colourwrapper import colour
from accelerator.compat import PY2
from accelerator import mp
def split_colour(spec):
seq, _ = colour.pre_post(spec)
if seq == '':
return '', ''
assert seq.startswith('\x1b[')
assert seq.endswith('m')
seq = seq[2:-1]
assert '\x1b' not in seq
fg = []
bg = []
for part in seq.split(';'):
code = int(part.split(':', 1)[0])
if 30 <= code <= 38 or 90 <= code <= 97:
target = fg
elif 40 <= code <= 48 or 100 <= code <= 107:
target = bg
elif code not in (39, 49):
print("Sorry, %s can only use colours, not attributes" % (spec,), file=sys.stderr)
sys.exit(1)
target.append(part)
return ';'.join(fg), ';'.join(bg)
# a rather incomplete SGR parser that replaces colour resets by our
# selected colour (if we have one).
def collect_escseq(it, line_fg, line_bg):
chars = ['\x1b']
try:
c = next(it)
chars.append(c)
if c == '[':
while True:
c = next(it)
if c == 'm':
pieces = []
for piece in ''.join(chars)[2:].split(';'):
code = int(piece.split(':', 1)[0] or '0', 10)
if code == 0:
pieces = ['']
if line_fg:
pieces.append(line_fg)
if line_bg:
pieces.append(line_bg)
elif code == 39 and line_fg:
pieces.append(line_fg)
elif code == 49 and line_bg:
pieces.append(line_bg)
else:
pieces.append(piece)
return ('\x1b[', ';'.join(pieces), 'm',)
chars.append(c)
if c not in '0123456789;:':
break
except (StopIteration, ValueError):
pass
return chars
_RC_EPIPE = 124 # process return code used to signal we died with EPIPE
class Liner:
def __init__(self, process, saved_stdout):
self.process = process
self.saved_stdout = saved_stdout
def close(self):
os.dup2(self.saved_stdout, 1) # EOF for the liner process (after all children have also exited)
os.close(self.saved_stdout)
self.process.join()
if self.process.exitcode and self.process.exitcode != _RC_EPIPE:
raise Exception('Liner process exited with %s' % (self.process.exitcode,))
def enable_lines(colour_prefix, lined=True, decode_lines=False, max_count=None, after=0):
if lined:
colour._lined = True
pre_fg0, pre_bg0 = split_colour(colour_prefix + '/oddlines')
pre_fg1, pre_bg1 = split_colour(colour_prefix + '/evenlines')
if pre_fg0 == pre_bg0 == pre_fg1 == pre_bg1 == '' and max_count is None:
return
else:
pre_fg0 = pre_bg0 = pre_fg1 = pre_bg1 = ''
def wrap_EPIPE(*a):
try:
return lineme(*a)
except OSError as e:
if e.errno == errno.EPIPE:
exit(_RC_EPIPE)
raise
def lineme(lined, max_count, after):
os.close(liner_w)
colours = cycle([
(pre_fg0, pre_bg0),
(pre_fg1, pre_bg1),
])
if PY2:
in_fh = sys.stdin
errors = 'replace'
else:
in_fh = sys.stdin.buffer.raw
errors = 'surrogateescape'
if decode_lines:
if lined:
def decode_part(part):
res = []
for part in part.split('\\n'):
part = part.strip('\r')
if line_bg:
res.append('\x1b[K')
res.append(part)
if line_bg and '\r' not in part:
res.append('\x1b[K')
res.append('\n')
return ''.join(res[:-1]) # final \n is added in the main loop
else:
# When not lined the transform should be completely transparent
def decode_part(part):
return part.replace('\\n', '\n')
for line in in_fh:
line_fg, line_bg = next(colours)
line = line.strip(b'\r\n').decode('utf-8', errors)
has_cr = ('\r' in line)
if max_count is not None:
if line == '':
# Empty lines mark the end of output sections, so if we
# see one when showing the final context we stop.
if max_count == 0:
break
continue
if line[0] in 'MC': # don't count "I"nfo lines, only "M"atches and "C"ontext
if line[0] == 'M' and max_count:
max_count -= 1
elif max_count == 0 and after > 0:
after -= 1
line = line[1:]
if decode_lines:
line = '\\'.join(decode_part(part) for part in line.split('\\\\'))
if lined:
todo = iter(line)
data = []
if line_fg and line_bg:
data.append('\x1b[%s;%sm' % (line_fg, line_bg,))
elif line_bg:
data.append('\x1b[%sm' % (line_bg,))
elif line_fg:
data.append('\x1b[%sm' % (line_fg,))
if line_bg and not decode_lines:
data.append('\x1b[K') # try to fill the line with bg (if terminal does BCE)
for c in todo:
if c == '\x1b':
data.extend(collect_escseq(todo, line_fg, line_bg))
else:
data.append(c)
if line_bg and not has_cr and not decode_lines:
# the line might have been long, so if safe and needed try
# again to fill the line with bg (if terminal does BCE)
data.append('\x1b[K')
data.append('\x1b[m\n')
data = ''.join(data).encode('utf-8', errors)
else:
data = line.encode('utf-8', errors) + b'\n'
while data:
data = data[os.write(1, data):]
if max_count is not None and max_count == after == 0:
break
liner_r, liner_w = os.pipe()
liner_process = mp.SimplifiedProcess(
target=wrap_EPIPE,
args=(lined, max_count, after,),
stdin=liner_r,
name=colour_prefix + '-liner',
)
os.close(liner_r)
saved_stdout = os.dup(1)
os.dup2(liner_w, 1) # this is stdout for the parent process now
os.close(liner_w)
return Liner(liner_process, saved_stdout)
| null |
accelerator/shell/lined.py
|
lined.py
|
py
| 6,731 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "accelerator.colourwrapper.colour.pre_post",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "accelerator.colourwrapper.colour",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "sys.stderr",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.dup2",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "os.close",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "accelerator.colourwrapper.colour._lined",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "accelerator.colourwrapper.colour",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "errno.EPIPE",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "os.close",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "itertools.cycle",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "accelerator.compat.PY2",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "sys.stdin",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "os.write",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "os.pipe",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "accelerator.mp.SimplifiedProcess",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "accelerator.mp",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "os.close",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "os.dup",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "os.dup2",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "os.close",
"line_number": 212,
"usage_type": "call"
}
] |
380725390
|
import os
import argparse
import pdb
from models.cgan.model import Generator
from models.classifier.model import get_MNIST_model
import models.classifier.train
import models.classifier.evaluate
import numpy as np
import torch
from torch.autograd import Variable
import torch.utils.data
from torch.utils.data import DataLoader
from torchvision import datasets
import torchvision.transforms as transforms
from torchvision.utils import save_image
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--n_epochs", type=int, default=200, help="number of epochs of training")
parser.add_argument("--batch_size", type=int, default=64, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
parser.add_argument("--latent_dim", type=int, default=100, help="dimensionality of the latent space")
parser.add_argument("--n_classes", type=int, default=10, help="number of classes for dataset")
parser.add_argument("--img_size", type=int, default=32, help="size of each image dimension")
parser.add_argument("--channels", type=int, default=1, help="number of image channels")
parser.add_argument("--sample_interval", type=int, default=400, help="interval between image sampling")
parser.add_argument("--save_every", type=int, default=25, help="interval between saving the model")
# parser.add_argument("--checkpoint_dir", type=str, default='./checkpoints', help="Checkpoint directory")
# parser.add_argument("--run_name", required=True)
parser.add_argument("--use_cuda", type=bool, default=False, help="Use CUDA if available")
parser.add_argument("--load_checkpoint", type=bool, default=False, help="Run from checkpoint")
opt = parser.parse_args()
if opt.use_cuda:
opt.use_cuda = torch.cuda.is_available()
return opt
def load_generator(opt):
checkpoint_file = './models/cgan/checkpoints/test1.last.pth'
generator = Generator(opt)
checkpoint = torch.load(checkpoint_file)
generator.load_state_dict(checkpoint['g_model_state_dict'])
generator.eval()
return generator
def generate_images(generator, num_images, opt, out_loc='./data/mnist/generated'):
print('Beginning to generate images in {}...'.format(out_loc))
if os.path.exists(out_loc):
print('Out loc already exists...')
return
else:
os.makedirs(out_loc)
FloatTensor = torch.cuda.FloatTensor if opt.use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if opt.use_cuda else torch.LongTensor
batch_size = opt.batch_size
img_per_class = num_images // opt.n_classes
for i in range(opt.n_classes):
labels = np.array([i for _ in range(img_per_class)])
num_generated = 0
class_out_loc = os.path.join(out_loc, str(i))
os.makedirs(class_out_loc, exist_ok=True)
while num_generated < img_per_class:
# Sample noise
if num_generated + batch_size >= img_per_class:
this_batch_size = img_per_class - num_generated
else:
this_batch_size = batch_size
z = Variable(FloatTensor(np.random.normal(0, 1, (this_batch_size, opt.latent_dim))))
these_labels = Variable(LongTensor(labels[num_generated:num_generated+this_batch_size]))
gen_imgs = generator(z, these_labels)
for i in range(gen_imgs.size()[0]):
out_file = os.path.join(class_out_loc, '{}.png'.format(i+num_generated))
save_image(gen_imgs[i,:,:,:], out_file)
num_generated += len(these_labels)
print('Done generating images!')
def get_val_loader(opt):
return torch.utils.data.DataLoader(
datasets.MNIST(
"./data/mnist",
train=False,
download=True,
transform=transforms.Compose(
[transforms.Resize(opt.img_size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]
),
),
batch_size=opt.batch_size,
shuffle=True,
)
def get_train_loader(num_true, num_gen, opt, gen_root='./data/mnist/generated'):
transform_list = [transforms.Resize(opt.img_size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]
true = datasets.MNIST(
"./data/mnist",
train=True,
download=True,
transform=transforms.Compose(transform_list),
)
gen = datasets.ImageFolder(
gen_root,
transform=transforms.Compose([transforms.Grayscale(num_output_channels=1)] + transform_list)
)
true_sample_indices = np.random.choice(len(true), num_true, replace=False)
gen_sample_indices = np.random.choice(len(gen), num_gen, replace=False)
true_sample = torch.utils.data.Subset(true, true_sample_indices)
gen_sample = torch.utils.data.Subset(gen, gen_sample_indices)
dataset = torch.utils.data.ConcatDataset([true_sample, gen_sample])
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size, shuffle=True)
return dataloader
def train_test_classifier(train_loader, val_loader, opt):
classifier = get_MNIST_model(opt)
classifier, train_acc = models.classifier.train.train_model(classifier, train_loader, opt)
print('Done training, now evaluating...')
val_acc = models.classifier.evaluate.evaluate_model(classifier, val_loader, opt)
return train_acc, val_acc
def main():
true_data_sizes = [0, 64, 128, 256, 512, 1024]
generated_data_sizes = [0, 64, 128, 256, 512, 1024]
opt = parse_args()
generator = load_generator(opt)
if opt.use_cuda:
generator.cuda()
generate_images(generator, max(generated_data_sizes), opt)
val_loader = get_val_loader(opt)
for generated_data_size in generated_data_sizes:
for true_data_size in true_data_sizes:
if true_data_size == 0 and generated_data_size == 0:
continue
print('Running with {} true images and {} generated images'.format(true_data_size, generated_data_size))
train_loader = get_train_loader(true_data_size, generated_data_size, opt)
train_acc, val_acc = train_test_classifier(train_loader, val_loader, opt)
print('Final train accuracy: {:.3f}\tval accuracy: {:.3f}'.format(train_acc, val_acc))
print('')
if __name__ == '__main__':
main()
| null |
run_baseline.py
|
run_baseline.py
|
py
| 6,724 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "models.cgan.model.Generator",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "torch.FloatTensor",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "torch.LongTensor",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "torchvision.utils.save_image",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "torchvision.datasets.MNIST",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets.MNIST",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "torchvision.datasets.ImageFolder",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Grayscale",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.Subset",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.Subset",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.ConcatDataset",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "models.classifier.model.get_MNIST_model",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "models.cgan.model.classifier.train.train_model",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "models.cgan.model.classifier",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "models.cgan.model",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "models.cgan.model.classifier.evaluate.evaluate_model",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "models.cgan.model.classifier",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "models.cgan.model",
"line_number": 124,
"usage_type": "name"
}
] |
241256279
|
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import os
import sys
model_urls = {
'imagenet': 'http://webia.lip6.fr/~cadene/Downloads/inceptionv4-97ef9c30.pth'
}
class BasicConv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) # verify bias false
self.bn = nn.BatchNorm2d(out_planes, eps=0.001, momentum=0, affine=True)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Mixed_3a(nn.Module):
def __init__(self):
super(Mixed_3a, self).__init__()
self.maxpool = nn.MaxPool2d(3, stride=2)
self.conv = BasicConv2d(64, 96, kernel_size=3, stride=2)
def forward(self, x):
x0 = self.maxpool(x)
x1 = self.conv(x)
out = torch.cat((x0, x1), 1)
return out
class Mixed_4a(nn.Module):
def __init__(self):
super(Mixed_4a, self).__init__()
self.branch0 = nn.Sequential(
BasicConv2d(160, 64, kernel_size=1, stride=1),
BasicConv2d(64, 96, kernel_size=3, stride=1)
)
self.branch1 = nn.Sequential(
BasicConv2d(160, 64, kernel_size=1, stride=1),
BasicConv2d(64, 64, kernel_size=(1,7), stride=1, padding=(0,3)),
BasicConv2d(64, 64, kernel_size=(7,1), stride=1, padding=(3,0)),
BasicConv2d(64, 96, kernel_size=(3,3), stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
return out
class Mixed_5a(nn.Module):
def __init__(self):
super(Mixed_5a, self).__init__()
self.conv = BasicConv2d(192, 192, kernel_size=3, stride=2)
self.maxpool = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.conv(x)
x1 = self.maxpool(x)
out = torch.cat((x0, x1), 1)
return out
class Inception_A(nn.Module):
def __init__(self):
super(Inception_A, self).__init__()
self.branch0 = BasicConv2d(384, 96, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(384, 64, kernel_size=1, stride=1),
BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1)
)
self.branch2 = nn.Sequential(
BasicConv2d(384, 64, kernel_size=1, stride=1),
BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1),
BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1)
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
BasicConv2d(384, 96, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class Reduction_A(nn.Module):
def __init__(self):
super(Reduction_A, self).__init__()
self.branch0 = BasicConv2d(384, 384, kernel_size=3, stride=2)
self.branch1 = nn.Sequential(
BasicConv2d(384, 192, kernel_size=1, stride=1),
BasicConv2d(192, 224, kernel_size=3, stride=1, padding=1),
BasicConv2d(224, 256, kernel_size=3, stride=2)
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class Inception_B(nn.Module):
def __init__(self):
super(Inception_B, self).__init__()
self.branch0 = BasicConv2d(1024, 384, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(1024, 192, kernel_size=1, stride=1),
BasicConv2d(192, 224, kernel_size=(1,7), stride=1, padding=(0,3)),
BasicConv2d(224, 256, kernel_size=(7,1), stride=1, padding=(3,0))
)
self.branch2 = nn.Sequential(
BasicConv2d(1024, 192, kernel_size=1, stride=1),
BasicConv2d(192, 192, kernel_size=(7,1), stride=1, padding=(3,0)),
BasicConv2d(192, 224, kernel_size=(1,7), stride=1, padding=(0,3)),
BasicConv2d(224, 224, kernel_size=(7,1), stride=1, padding=(3,0)),
BasicConv2d(224, 256, kernel_size=(1,7), stride=1, padding=(0,3))
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
BasicConv2d(1024, 128, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class Reduction_B(nn.Module):
def __init__(self):
super(Reduction_B, self).__init__()
self.branch0 = nn.Sequential(
BasicConv2d(1024, 192, kernel_size=1, stride=1),
BasicConv2d(192, 192, kernel_size=3, stride=2)
)
self.branch1 = nn.Sequential(
BasicConv2d(1024, 256, kernel_size=1, stride=1),
BasicConv2d(256, 256, kernel_size=(1,7), stride=1, padding=(0,3)),
BasicConv2d(256, 320, kernel_size=(7,1), stride=1, padding=(3,0)),
BasicConv2d(320, 320, kernel_size=3, stride=2)
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class Inception_C(nn.Module):
def __init__(self):
super(Inception_C, self).__init__()
self.branch0 = BasicConv2d(1536, 256, kernel_size=1, stride=1)
self.branch1_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1)
self.branch1_1a = BasicConv2d(384, 256, kernel_size=(1,3), stride=1, padding=(0,1))
self.branch1_1b = BasicConv2d(384, 256, kernel_size=(3,1), stride=1, padding=(1,0))
self.branch2_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1)
self.branch2_1 = BasicConv2d(384, 448, kernel_size=(3,1), stride=1, padding=(1,0))
self.branch2_2 = BasicConv2d(448, 512, kernel_size=(1,3), stride=1, padding=(0,1))
self.branch2_3a = BasicConv2d(512, 256, kernel_size=(1,3), stride=1, padding=(0,1))
self.branch2_3b = BasicConv2d(512, 256, kernel_size=(3,1), stride=1, padding=(1,0))
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
BasicConv2d(1536, 256, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1_0 = self.branch1_0(x)
x1_1a = self.branch1_1a(x1_0)
x1_1b = self.branch1_1b(x1_0)
x1 = torch.cat((x1_1a, x1_1b), 1)
x2_0 = self.branch2_0(x)
x2_1 = self.branch2_1(x2_0)
x2_2 = self.branch2_2(x2_1)
x2_3a = self.branch2_3a(x2_2)
x2_3b = self.branch2_3b(x2_2)
x2 = torch.cat((x2_3a, x2_3b), 1)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class InceptionV4(nn.Module):
def __init__(self, num_classes=1001):
super(InceptionV4, self).__init__()
self.features = nn.Sequential(
BasicConv2d(3, 32, kernel_size=3, stride=2),
BasicConv2d(32, 32, kernel_size=3, stride=1),
BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1),
Mixed_3a(),
Mixed_4a(),
Mixed_5a(),
Inception_A(),
Inception_A(),
Inception_A(),
Inception_A(),
Reduction_A(), # Mixed_6a
Inception_B(),
Inception_B(),
Inception_B(),
Inception_B(),
Inception_B(),
Inception_B(),
Inception_B(),
Reduction_B(), # Mixed_7a
Inception_C(),
Inception_C(),
Inception_C(),
nn.AvgPool2d(8, count_include_pad=False)
)
self.classif = nn.Linear(1536, num_classes)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classif(x)
return x
def inceptionv4(pretrained=True):
model = InceptionV4()
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['imagenet']))
return model
######################################################################
## Load parameters from HDF5 to Dict
######################################################################
def load_conv2d(state_dict, name_pth, name_tf):
h5f = h5py.File('dump/InceptionV4/'+name_tf+'.h5', 'r')
state_dict[name_pth+'.conv.weight'] = torch.from_numpy(h5f['weights'][()]).permute(3, 2, 0, 1)
out_planes = state_dict[name_pth+'.conv.weight'].size(0)
state_dict[name_pth+'.bn.weight'] = torch.ones(out_planes)
state_dict[name_pth+'.bn.bias'] = torch.from_numpy(h5f['beta'][()])
state_dict[name_pth+'.bn.running_mean'] = torch.from_numpy(h5f['mean'][()])
state_dict[name_pth+'.bn.running_var'] = torch.from_numpy(h5f['var'][()])
h5f.close()
def load_linear(state_dict, name_pth, name_tf):
h5f = h5py.File('dump/InceptionV4/'+name_tf+'.h5', 'r')
state_dict[name_pth+'.weight'] = torch.from_numpy(h5f['weights'][()]).t()
state_dict[name_pth+'.bias'] = torch.from_numpy(h5f['biases'][()])
h5f.close()
def load_mixed_4a_7a(state_dict, name_pth, name_tf):
load_conv2d(state_dict, name_pth+'.branch0.0', name_tf+'/Branch_0/Conv2d_0a_1x1')
load_conv2d(state_dict, name_pth+'.branch0.1', name_tf+'/Branch_0/Conv2d_1a_3x3')
load_conv2d(state_dict, name_pth+'.branch1.0', name_tf+'/Branch_1/Conv2d_0a_1x1')
load_conv2d(state_dict, name_pth+'.branch1.1', name_tf+'/Branch_1/Conv2d_0b_1x7')
load_conv2d(state_dict, name_pth+'.branch1.2', name_tf+'/Branch_1/Conv2d_0c_7x1')
load_conv2d(state_dict, name_pth+'.branch1.3', name_tf+'/Branch_1/Conv2d_1a_3x3')
def load_mixed_5(state_dict, name_pth, name_tf):
load_conv2d(state_dict, name_pth+'.branch0', name_tf+'/Branch_0/Conv2d_0a_1x1')
load_conv2d(state_dict, name_pth+'.branch1.0', name_tf+'/Branch_1/Conv2d_0a_1x1')
load_conv2d(state_dict, name_pth+'.branch1.1', name_tf+'/Branch_1/Conv2d_0b_3x3')
load_conv2d(state_dict, name_pth+'.branch2.0', name_tf+'/Branch_2/Conv2d_0a_1x1')
load_conv2d(state_dict, name_pth+'.branch2.1', name_tf+'/Branch_2/Conv2d_0b_3x3')
load_conv2d(state_dict, name_pth+'.branch2.2', name_tf+'/Branch_2/Conv2d_0c_3x3')
load_conv2d(state_dict, name_pth+'.branch3.1', name_tf+'/Branch_3/Conv2d_0b_1x1')
def load_mixed_6(state_dict, name_pth, name_tf):
load_conv2d(state_dict, name_pth+'.branch0', name_tf+'/Branch_0/Conv2d_0a_1x1')
load_conv2d(state_dict, name_pth+'.branch1.0', name_tf+'/Branch_1/Conv2d_0a_1x1')
load_conv2d(state_dict, name_pth+'.branch1.1', name_tf+'/Branch_1/Conv2d_0b_1x7')
load_conv2d(state_dict, name_pth+'.branch1.2', name_tf+'/Branch_1/Conv2d_0c_7x1')
load_conv2d(state_dict, name_pth+'.branch2.0', name_tf+'/Branch_2/Conv2d_0a_1x1')
load_conv2d(state_dict, name_pth+'.branch2.1', name_tf+'/Branch_2/Conv2d_0b_7x1')
load_conv2d(state_dict, name_pth+'.branch2.2', name_tf+'/Branch_2/Conv2d_0c_1x7')
load_conv2d(state_dict, name_pth+'.branch2.3', name_tf+'/Branch_2/Conv2d_0d_7x1')
load_conv2d(state_dict, name_pth+'.branch2.4', name_tf+'/Branch_2/Conv2d_0e_1x7')
load_conv2d(state_dict, name_pth+'.branch3.1', name_tf+'/Branch_3/Conv2d_0b_1x1')
def load_mixed_7(state_dict, name_pth, name_tf):
load_conv2d(state_dict, name_pth+'.branch0', name_tf+'/Branch_0/Conv2d_0a_1x1')
load_conv2d(state_dict, name_pth+'.branch1_0', name_tf+'/Branch_1/Conv2d_0a_1x1')
load_conv2d(state_dict, name_pth+'.branch1_1a', name_tf+'/Branch_1/Conv2d_0b_1x3')
load_conv2d(state_dict, name_pth+'.branch1_1b', name_tf+'/Branch_1/Conv2d_0c_3x1')
load_conv2d(state_dict, name_pth+'.branch2_0', name_tf+'/Branch_2/Conv2d_0a_1x1')
load_conv2d(state_dict, name_pth+'.branch2_1', name_tf+'/Branch_2/Conv2d_0b_3x1')
load_conv2d(state_dict, name_pth+'.branch2_2', name_tf+'/Branch_2/Conv2d_0c_1x3')
load_conv2d(state_dict, name_pth+'.branch2_3a', name_tf+'/Branch_2/Conv2d_0d_1x3')
load_conv2d(state_dict, name_pth+'.branch2_3b', name_tf+'/Branch_2/Conv2d_0e_3x1')
load_conv2d(state_dict, name_pth+'.branch3.1', name_tf+'/Branch_3/Conv2d_0b_1x1')
def load():
state_dict={}
load_conv2d(state_dict, name_pth='features.0', name_tf='Conv2d_1a_3x3')
load_conv2d(state_dict, name_pth='features.1', name_tf='Conv2d_2a_3x3')
load_conv2d(state_dict, name_pth='features.2', name_tf='Conv2d_2b_3x3')
load_conv2d(state_dict, name_pth='features.3.conv', name_tf='Mixed_3a/Branch_1/Conv2d_0a_3x3')
load_mixed_4a_7a(state_dict, name_pth='features.4', name_tf='Mixed_4a')
load_conv2d(state_dict, name_pth='features.5.conv', name_tf='Mixed_5a/Branch_0/Conv2d_1a_3x3')
load_mixed_5(state_dict, name_pth='features.6', name_tf='Mixed_5b')
load_mixed_5(state_dict, name_pth='features.7', name_tf='Mixed_5c')
load_mixed_5(state_dict, name_pth='features.8', name_tf='Mixed_5d')
load_mixed_5(state_dict, name_pth='features.9', name_tf='Mixed_5e')
load_conv2d(state_dict, name_pth='features.10.branch0', name_tf='Mixed_6a/Branch_0/Conv2d_1a_3x3')
load_conv2d(state_dict, name_pth='features.10.branch1.0', name_tf='Mixed_6a/Branch_1/Conv2d_0a_1x1')
load_conv2d(state_dict, name_pth='features.10.branch1.1', name_tf='Mixed_6a/Branch_1/Conv2d_0b_3x3')
load_conv2d(state_dict, name_pth='features.10.branch1.2', name_tf='Mixed_6a/Branch_1/Conv2d_1a_3x3')
load_mixed_6(state_dict, name_pth='features.11', name_tf='Mixed_6b')
load_mixed_6(state_dict, name_pth='features.12', name_tf='Mixed_6c')
load_mixed_6(state_dict, name_pth='features.13', name_tf='Mixed_6d')
load_mixed_6(state_dict, name_pth='features.14', name_tf='Mixed_6e')
load_mixed_6(state_dict, name_pth='features.15', name_tf='Mixed_6f')
load_mixed_6(state_dict, name_pth='features.16', name_tf='Mixed_6g')
load_mixed_6(state_dict, name_pth='features.17', name_tf='Mixed_6h')
load_mixed_4a_7a(state_dict, name_pth='features.18', name_tf='Mixed_7a')
load_mixed_7(state_dict, name_pth='features.19', name_tf='Mixed_7b')
load_mixed_7(state_dict, name_pth='features.20', name_tf='Mixed_7c')
load_mixed_7(state_dict, name_pth='features.21', name_tf='Mixed_7d')
load_linear(state_dict, name_pth='classif', name_tf='Logits')
return state_dict
######################################################################
## Test
######################################################################
def test(model):
model.eval()
from scipy import misc
img = misc.imread('lena_299.png')
inputs = torch.zeros(1,299,299,3)
inputs[0] = torch.from_numpy(img)
inputs.transpose_(1,3)
inputs.transpose_(2,3)
# 1, 3, 299, 299
outputs = model.forward(torch.autograd.Variable(inputs))
h5f = h5py.File('dump/InceptionV4/Logits.h5', 'r')
outputs_tf = torch.from_numpy(h5f['out'][()])
h5f.close()
outputs = torch.nn.functional.softmax(outputs)
print(torch.dist(outputs.data, outputs_tf))
return outputs
def test_conv2d(module, name):
#global output_tf
h5f = h5py.File('dump/InceptionV4/'+name+'.h5', 'r')
output_tf = torch.from_numpy(h5f['relu_out'][()])
output_tf.transpose_(1,3)
output_tf.transpose_(2,3)
h5f.close()
def test_dist(self, input, output):
print(name, torch.dist(output.data, output_tf))
module.register_forward_hook(test_dist)
def test_mixed_4a_7a(module, name):
test_conv2d(module.branch0[0], name+'/Branch_0/Conv2d_0a_1x1')
test_conv2d(module.branch0[1], name+'/Branch_0/Conv2d_1a_3x3')
test_conv2d(module.branch1[0], name+'/Branch_1/Conv2d_0a_1x1')
test_conv2d(module.branch1[1], name+'/Branch_1/Conv2d_0b_1x7')
test_conv2d(module.branch1[2], name+'/Branch_1/Conv2d_0c_7x1')
test_conv2d(module.branch1[3], name+'/Branch_1/Conv2d_1a_3x3')
######################################################################
## Main
######################################################################
if __name__ == "__main__":
import h5py
model = InceptionV4()
state_dict = load()
model.load_state_dict(state_dict)
# test_conv2d(model.features[0], 'Conv2d_1a_3x3')
# test_conv2d(model.features[1], 'Conv2d_2a_3x3')
# test_conv2d(model.features[2], 'Conv2d_2b_3x3')
# test_conv2d(model.features[3].conv, 'Mixed_3a/Branch_1/Conv2d_0a_3x3')
# test_mixed_4a_7a(model.features[4], 'Mixed_4a')
os.system('mkdir -p save')
torch.save(model, 'save/inceptionv4.pth')
torch.save(state_dict, 'save/inceptionv4_state.pth')
outputs = test(model)
| null |
model_zoo/inceptionv4/pytorch_load.py
|
pytorch_load.py
|
py
| 17,252 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.nn.Module",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "torch.nn.AvgPool2d",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "torch.nn.AvgPool2d",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "torch.nn.AvgPool2d",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 230,
"usage_type": "name"
},
{
"api_name": "torch.nn.AvgPool2d",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 253,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 255,
"usage_type": "name"
},
{
"api_name": "torch.utils.model_zoo.load_url",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "torch.utils.model_zoo",
"line_number": 266,
"usage_type": "name"
},
{
"api_name": "torch.from_numpy",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "scipy.misc.imread",
"line_number": 379,
"usage_type": "call"
},
{
"api_name": "scipy.misc",
"line_number": 379,
"usage_type": "name"
},
{
"api_name": "torch.zeros",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "torch.autograd",
"line_number": 385,
"usage_type": "attribute"
},
{
"api_name": "torch.from_numpy",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 389,
"usage_type": "attribute"
},
{
"api_name": "torch.dist",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "torch.dist",
"line_number": 401,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 431,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 432,
"usage_type": "call"
}
] |
38227380
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ! code .
# ## 1コンポーネント
#
# 基本
# +
import heatrapy as ht
import pandas as pd
import os
if os.path.exists("heat_transfer.txt"):
os.remove("heat_transfer.txt")
example = ht.single_object(amb_temperature=293, materials=('water',), borders=(1,21),materials_order=(0,),
dx=0.001, dt=0.001, file_name='heat_transfer.txt',boundaries=(0,0), Q=[], Q0=[],initial_state=False)
example.Cp[1],example.k[1],example.rho[1]
example.set_input_heat_transfer(1,700,1500)
example.set_radiation(1,0.9,293.15)
example.compute(timeInterval=30, write_interval=10, solver='implicit_k(x)')
df = pd.read_csv("heat_transfer.txt")
df=df.drop("heat[1](W)", axis=1)
df=df.drop("heat[-2](J)", axis=1)
df = df.set_index("time(s)")
df.plot(figsize=(15,8))
# -
# 2物質の固着
# +
import heatrapy as ht
import pandas as pd
import utility
import os
if os.path.exists("example.txt"):
os.remove("example.txt")
example = ht.single_object(amb_temperature=293, materials=('Gd','Cu'), borders=(1,11,21),materials_order=(0,1),
dx=0.05, dt=0.1, file_name='example.txt',boundaries=(300,0), Q=[], Q0=[],initial_state=False)
example.compute(timeInterval=100000, write_interval=1000, solver='implicit_k(x)')
# +
df = pd.DataFrame({"time(s)":[0,100,200,300,1000],
"Heat_transfer_coefficient":[30,90,45,50,20],
"temparature":[500,600,650,450,300]
})
f_Heat_transfer_coefficient = utility.create_function(df["time(s)"], df["Heat_transfer_coefficient"])
f_temparature = utility.create_function(df["time(s)"], df["temparature"])
example.set_input_heat_transfer_function(1,f_Heat_transfer_coefficient,f_temparature)
# -
example.compute(timeInterval=100000, write_interval=1000, solver='implicit_k(x)')
df = pd.read_csv("example.txt")
df=df.drop("heat[1](W)", axis=1)
df=df.drop("heat[-2](J)", axis=1)
df = df.set_index("time(s)")
# df.plot(figsize=(15,8))
df_taisho = df.T
import numpy as np
df_taisho = df_taisho.set_index(pd.Series(np.arange(0,11,0.5)))
df_taisho = df_taisho[100000.000001]
df_taisho = df_taisho[0:10]
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
sns.set()# SeabornのデフォルトStyleを使用
fig = plt.figure(figsize=(5,4))# グラフのサイズを設定(横×縦)
ax = fig.add_subplot(111)
df_taisho.plot()
ax.set_xticks(np.arange(0,11,1))# X軸のTick(目盛)の位置を設定
ax.set_yticks(np.arange(297, 301., 0.5))# X軸のTick(目盛)の表記を設定
# ## 2コンポーネント
# ## 2コンポーネント
import heatrapy as ht
import pandas as pd
import utility
import os
# コンポーネント1の特性
material_1 = "water"
length_1 = 50
# コンポーネント2の特性
material_2 = "Gd"
length_2 = 50
# 共通事項
# +
init_temperature = 293 #初期温度
output_file_name_header = "2comp_test"
analysis_time = 1000
dt = 0.1
# -
# 解析モデルの作成
# + {"code_folding": []}
if os.path.exists(output_file_name_header+"_0.txt"):
os.remove(output_file_name_header+"_0.txt")
if os.path.exists(output_file_name_header+"_1.txt"):
os.remove(output_file_name_header+"_1.txt")
two_comp = ht.system_objects(number_objects=2, materials=(material_1, material_2),
objects_length=(length_1, length_2), amb_temperature=init_temperature, dx=0.01, dt=dt,
file_name=output_file_name_header,boundaries=((0,0), (1, 0)), initial_state=False, materials_path=False)
# -
# 境界条件の設定(断熱以外のオブジェクトを設定する)
two_comp.objects[0].boundaries=(300,0)
for i in range(len(two_comp.objects)):
print("コンポーネント" + str(i) + " (前端温度、後端温度)=" + str(two_comp.objects[i].boundaries) + " ※0は断熱条件")
# 熱伝達で入熱
# +
# two_comp.set_input_heat_transfer((0,1),700,900)
# -
# 熱伝達で入熱 関数
df = pd.DataFrame({"time(s)":[0,100,200,300,1000],
"Heat_transfer_coefficient":[300,500,600,900,1200],
"temparature":[1000,1200,1500,1700,1800]
})
f_Heat_transfer_coefficient = utility.create_function(df["time(s)"], df["Heat_transfer_coefficient"])
f_temparature = utility.create_function(df["time(s)"], df["temparature"])
two_comp.set_input_heat_transfer_function((0,1),f_Heat_transfer_coefficient,f_temparature)
df.plot(x="time(s)")
# 輻射を追加
two_comp.set_radiation((0,1),0.9,293.15)
# 接触の定義
two_comp.contactAdd(((0,11),(1,1),30000))
two_comp.contacts
# 問題を解く
two_comp.compute(timeInterval=analysis_time, write_interval=100, solver='implicit_k(x)')
# ポスト処理
df_1 = pd.read_csv(output_file_name_header+"_0.txt",dtype=float).drop(["T[0] (K)"],axis=1)
df_2 = pd.read_csv(output_file_name_header+"_1.txt",dtype=float).drop(["T[0] (K)"],axis=1)
1493.298796
df_1
df_1.plot(x="time(s)",figsize=(15,8))
df_2.plot(x="time(s)",figsize=(15,8))
# ## 3コンポーネント
import heatrapy as ht
import pandas as pd
import os
output_file_name_header = "test_1"
three_comp = ht.system_objects(number_objects=3, materials=('Cu', 'AL','Cu'),
objects_length=(10, 10,20), amb_temperature=293, dx=0.001, dt=0.01,
file_name=output_file_name_header, initial_state=False,
boundaries=((2, 0), (3, 0),(0,0)), materials_path=False)
three_comp.objects[0].boundaries=(500,0)
for i in range(len(three_comp.objects)):
print("コンポーネント" + str(i) + " (前端温度、後端温度)=" + str(three_comp.objects[i].boundaries) + " ※0は断熱条件")
three_comp.contacts.add(((0,10),(1,1),3000))
three_comp.contacts.add(((1,10),(2,1),5000))
three_comp.compute(timeInterval=60, write_interval=100, solver='implicit_k(x)')
df_1 = pd.read_csv(output_file_name_header+"_0.txt",dtype=float).drop(["T[0] (K)"],axis=1)
df_2 = pd.read_csv(output_file_name_header+"_1.txt",dtype=float).drop(["T[0] (K)"],axis=1)
df_3 = pd.read_csv(output_file_name_header+"_2.txt",dtype=float).drop(["T[0] (K)"],axis=1)
df_1.plot(x="time(s)",figsize=(15,8))
df_2.plot(x="time(s)",figsize=(15,8))
df_3.plot(x="time(s)",figsize=(15,8))
| null |
test.py
|
test.py
|
py
| 6,512 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.exists",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "heatrapy.single_object",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "heatrapy.single_object",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "utility.create_function",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "utility.create_function",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "seaborn.set",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "heatrapy.system_objects",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "utility.create_function",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "utility.create_function",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "heatrapy.system_objects",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 220,
"usage_type": "call"
}
] |
89818221
|
############################ JSON representation ##############################
import webapp2
import json
from utilities.db_entities import *
class JsonHandler(webapp2.RequestHandler):
def get(self):
posts = db.GqlQuery("SELECT * FROM Posts ORDER BY time_date DESC")
posts_list = []
for p in posts:
p_dict = {}
p_dict["subject"] = p.subject
p_dict["content"] = p.content
p_dict["created"] = p.time_date.strftime('%d-%b-%Y %H:%M')
p_dict["writer_username"] = p.uid.username
posts_list.append(p_dict)
j = json.dumps(posts_list, indent=1)
self.response.headers["Content-Type"] = "application/json; charset=UTF-8"
self.response.out.write(j)
class JsonPermalinkHandler(webapp2.RequestHandler):
def get(self, post_id):
key = db.Key.from_path('Posts', int(post_id), parent=blog_key())
p = db.get(key)
if p:
p_dict = {}
p_dict["subject"] = p.subject
p_dict["content"] = p.content
p_dict["created"] = p.time_date.strftime('%d-%b-%Y %H:%M')
p_dict["writer_username"] = p.uid.username
j = json.dumps(p_dict, indent=1)
self.response.headers["Content-Type"] = "application/json; charset=UTF-8"
self.response.out.write(j)
else:
self.response.headers["Content-Type"] = "application/json; charset=UTF-8"
self.response.out.write("{}")
| null |
handlers/jsonHandler.py
|
jsonHandler.py
|
py
| 1,509 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "webapp2.RequestHandler",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "webapp2.RequestHandler",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 33,
"usage_type": "call"
}
] |
357309904
|
import cv2
import numpy as np
from webapp.GymTrainerComponents.PoseDetector import PoseDetector
vCap = cv2.VideoCapture("videos/v5.mp4")
pDetector = PoseDetector()
count = 0
direction = 0
while True:
success, img = vCap.read()
result, img = pDetector.getPose(img, False)
landMarksList = pDetector.getPoseLandMarks(img, result,False)
if len(landMarksList) != 0:
angelR=pDetector.getAngle(img,12,14,16,landMarksList)
angelL= pDetector.getAngle(img, 11, 13, 15, landMarksList)
#approximate range of angles 68-10, conver them between 0-100
perR=np.interp(angelR,(150,160),(0,100))
perL = np.interp(angelL, (130, 160), (0, 100))
#print(angel)
#print(per)
if perL >= 90 and perL <= 100:
color = (0, 255, 0)
if direction == 0:
count += 0.5
direction = 1
if perL >= 0 and perL <= 10:
color = (0, 255, 0)
if direction == 1:
count += 0.5
direction = 0
if perR >= 90 and perR <= 100:
color = (0, 255, 0)
if direction == 0:
count += 0.5
direction = 1
if perR >= 0 and perR <= 10:
color = (0, 255, 0)
if direction == 1:
count += 0.5
direction = 0
print(count)
text= "Punch Tracker " #+ str(int(count))
cv2.putText(img, text, (10, 500), cv2.FONT_HERSHEY_PLAIN, 10,
(255, 0, 0), 25)
imS = cv2.resize(img, (1600, 1000))
cv2.imshow("image", imS)
cv2.waitKey(1)
| null |
webapp/GymTrainerComponents/BoxingTrainer.py
|
BoxingTrainer.py
|
py
| 1,618 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cv2.VideoCapture",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "webapp.GymTrainerComponents.PoseDetector.PoseDetector",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.interp",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.interp",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_PLAIN",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 48,
"usage_type": "call"
}
] |
194219016
|
'''
Req
url
base (taken from server object)
endpoint (from path)
parameters (copu of parameter object)
name: parameter name
location: query,header,path,cookie
required: true/false
value: fuzzable variable
method (from path GET,PUT etc)
headers dict? list?
content (from requestbody and parameter?)
'''
import random
import string
import logging
import requests
import requests_oauthlib
import re
import sys
import model3 as model
import datetime
class Url:
def __init__(self, base, endpoint, parameter=''):
self.base = base
self.endpoint = endpoint
self.parameter = parameter
class Req:
# Bunch of values used in dummy value generation.
# I wasn't hundred percent sure about all the ways different formats might be spelled so I created
# these lists
integers = ["integer", "Integer", "int", "Int", "INT"]
strings = ["string", "String", "STRING", "str"]
arrays = ["array", "Array", "ARRAY"]
objects = ["object", "obj", "Object"]
booleans = ["bool", "boolean", "Boolean"]
rand_range = [0, 100]
rand_string_length = 10
# location for the dummy value
DUMMY_FILE = 'seed/dummy'
# Sets requests allow_redirects parameters value
ALLOW_REDIRECTS = False
def __init__(self, url, parameters, method, header=None, content=None, security=None, responses=None):
self.url = url
self.parameters = parameters
self.method = method
# Header is mean for headers that are not security or parameter related.
# Consider combining them
if header is None:
self.header = {}
else:
self.header = header
# requestBody object
self.content = content
self.security = security
self.responses = responses
self.par_query = {}
self.par_header = {}
self.par_cookie = {}
self.sec_query = {}
self.sec_header = {}
self.sec_cookie = {}
# Contains uploaded files
self.files = {}
#self.requestBody = None
def handle_object(self, param):
for par in param.value:
self.change_pars(par)
'''
if par.format_ in self.integers and par.options is None:
logging.debug(" Parameter used to be {} with value {}".format(par.format_, par.value))
par.value = random.randint(self.rand_range[0], self.rand_range[1])
logging.debug(" Value was changed to {}".format(par.value))
elif par.format_ in self.strings and par.options is None:
# elif isinstance(par.value, str) and par.options is None:
logging.debug(" Parameter used to be {} with value {}".format(par.format_, par.value))
par.value = ''.join(random.choices(string.ascii_uppercase + string.digits, k=self.rand_string_length))
logging.debug(" Value was changed to {}".format(par.value))
elif par.format_ in self.arrays:
# Going with the assumption that there are no other parameter within the array
# Parameter is in Parameter object form
for innerpar in par.value:
logging.info("innerpar: {}".format(innerpar))
logging.info(innerpar.print_info())
if innerpar.options is not None:
# parameter has options
innerpar.value = random.choice(innerpar.options)
else:
# There are no options
if innerpar.format_ in self.strings:
par.value = ''.join(
random.choices(string.ascii_uppercase + string.digits, k=self.rand_string_length))
elif innerpar.format_ in self.integers:
par.value = random.randint(self.rand_range[0], self.rand_range[1])
elif par.format_ in self.objects:
logging.debug("Parameter is an object")
self.handle_object(par)
elif par.options is not None:
logging.debug("Par options was not None and it wasn't an array")
par.value = random.choice(par.options)
else:
logging.error("Setting dummy values failed. Parameter was {} \r\nSetting value to 42".format(par.name))
par.value = 42
'''
return
def set_dummy_values(self):
for par in self.parameters:
self.change_pars(par)
'''
If par.value is None generates either a random int or string
logging.debug("Url is {}.".format(self.url.base[0] + self.url.endpoint))
logging.debug(" Parameter: {}".format(par.name))
# Integers
if par.format_ in self.integers and par.options is None:
logging.debug(" Parameter used to be {} with value {}".format(par.format_, par.value))
par.value = random.randint(self.rand_range[0], self.rand_range[1])
logging.debug(" Value was changed to {}".format(par.value))
# Strings
elif par.format_ in self.strings and par.options is None:
#elif isinstance(par.value, str) and par.options is None:
logging.debug(" Parameter used to be {} with value {}".format(par.format_, par.value))
par.value = ''.join(random.choices(string.ascii_uppercase + string.digits, k=self.rand_string_length))
logging.debug(" Value was changed to {}".format(par.value))
# Arrays
elif par.format_ in self.arrays:
# Going with the assumption that there are no other parameter within the array
# Parameter is in Parameter object form
innerpar = par.value
logging.info("Parameter is object: {} {}".format(par, innerpar))
logging.info("innerpar par: {}".format(innerpar))
if innerpar.options is not None:
#parameter has options
logging.info("Changing {} to {}".format(innerpar.value, random.choice(innerpar.options)))
innerpar.value = random.choice(innerpar.options)
else:
#There are no options
if innerpar.format_ in self.strings:
par.value = ''.join(
random.choices(string.ascii_uppercase + string.digits, k=self.rand_string_length))
elif innerpar.format_ in self.integers:
par.value = random.randint(self.rand_range[0], self.rand_range[1])
# Objects
elif par.format_ in self.objects:
logging.debug("Parameter is an object")
self.handle_object(par)
# Parameter that can only have specific options
elif par.options is not None:
logging.debug("Par options was not None and it wasn't an array")
par.value = random.choice(par.options)
# Booleans
elif par.format_ in self.booleans:
par.value = random.choice([True, False])
# Everything else
else:
logging.error("Setting dummy values failed. Parameter was {} \r\nSetting value to 42".format(par.name))
par.value = 42
'''
logging.info("RequestBody: {} ".format(self.content))
for o in self.content:
logging.info("Processing requestbody {} {}".format(o, o.type_))
for par in o.params:
self.change_pars(par)
'''
logging.info("Processing parameter {}".format(par.name))
if par.format_ in self.integers and par.options is None:
logging.debug(" Parameter used to be {} with value {}".format(par.format_, par.value))
par.value = random.randint(self.rand_range[0], self.rand_range[1])
logging.debug(" Value was changed to {}".format(par.value))
elif par.format_ in self.strings and par.options is None:
# elif isinstance(par.value, str) and par.options is None:
logging.debug(" Parameter used to be {} with value {}".format(par.format_, par.value))
par.value = ''.join(
random.choices(string.ascii_uppercase + string.digits, k=self.rand_string_length))
logging.debug(" Value was changed to {}".format(par.value))
elif par.format_ in self.arrays:
# Going with the assumption that there are no other parameter within the array
# Parameter is in Parameter object form
for innerpar in par.value:
logging.info("innerpar: {}".format(innerpar))
logging.info(innerpar.print_info())
if innerpar.options is not None:
# parameter has options
innerpar.value = random.choice(innerpar.options)
elif innerpar.format_ in self.objects:
# Handle objects
self.handle_object(innerpar)
else:
# There are no options
if innerpar.format_ in self.strings:
par.value = ''.join(
random.choices(string.ascii_uppercase + string.digits, k=self.rand_string_length))
elif innerpar.format_ in self.integers:
par.value = random.randint(self.rand_range[0], self.rand_range[1])
elif par.format_ in self.objects:
logging.debug("Parameter is an object")
self.handle_object(par)
elif par.options is not None:
logging.debug("Par options was not None and it wasn't an array")
par.value = random.choice(par.options)
else:
logging.error(
"Setting dummy values failed. Parameter was {} \r\nSetting value to 42".format(par.name))
par.value = 42
'''
# TODO Refactoring. Remove above comments when everything works
def change_pars(self, par):
try:
logging.info("Processing parameter {}".format(par.name))
if par.format_ in self.integers and par.options is None:
logging.debug(" Parameter used to be {} with value {}".format(par.format_, par.value))
# TODO utilize format_detailed
par.value = random.randint(self.rand_range[0], self.rand_range[1])
logging.debug(" Value was changed to {}".format(par.value))
elif par.format_ in self.strings and par.options is None:
'''
Create cases where string must abide to a specific format or similar to here.
'''
logging.debug(" Parameter used to be {} with value {}".format(par.format_, par.value))
if par.format_detailed == "date" or par.format_detailed == "date-time":
# If the parameter should be in date/date-time format we give it current time
par.value = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
elif par.format_detailed == "binary":
# Api expects a file
par.value = par.name
if par.name not in self.files:
# Add dummy file to self.files
self.files[par.name] = open(self.DUMMY_FILE, 'rb')
else:
# If above ifs don't happen we create a random string
par.value = ''.join(
random.choices(string.ascii_uppercase + string.digits, k=self.rand_string_length))
logging.debug(" Value was changed to {}".format(par.value))
elif par.format_ in self.arrays:
try:
for innerpar in par.value:
self.change_pars(innerpar)
except TypeError:
# Happens when there is only one object in array
self.change_pars(par.value)
elif par.format_ in self.objects:
logging.debug("Parameter is an object")
for innerpar in par.value:
self.change_pars(innerpar)
#self.handle_object(par)
elif par.options is not None:
logging.debug("Par options was not None and it wasn't an array")
par.value = random.choice(par.options)
# Booleans
elif par.format_ in self.booleans:
par.value = random.choice([True, False])
else:
logging.error(
"Setting dummy values failed. Parameter was {} \r\nSetting value to 42".format(par.name))
par.value = 42
except AttributeError:
return
return
def connect_oauth(self, sec):
'''
Creates an oauth session based on the information found in the sec object
:param sec:
:return:
'''
# TODO add support for more oauth flows
# TODO implicit doesn't seem to work. Need to check if the issues is with petshop or with the implementation
# Implicit flow. Only using required parameters
scope = []
if sec.type_ == "oauth2" and sec.flows.get("implicit") is not None:
try:
for s, value in sec.flows.get("implicit").get("scopes").items():
scope.append(s)
auth_url = sec.flows.get("implicit").get("authorizationUrl")
except TypeError:
logging.exception("Scopes likely missing")
return None
except KeyError:
logging.exception("authorizationUrl likely missing")
return None
logging.info("oauth2 implicit flow")
logging.info(auth_url)
# TODO this part crashes if auth_url isn't using https
oauth = requests_oauthlib.OAuth2Session(scope=scope, client_id="sample-client-id")
url, state = oauth.authorization_url(auth_url)
logging.debug("state: {} url: {}".format(state,url))
return oauth, state
def send(self, args, session=None):
'''
Sends the request based on its values.
returns:
response code + request object:
False: send failed for some reason.
'''
# Requests library takes parameters as dict so we create one for each type
logging.debug("Preparing to send")
# Loop through all parameters
for p in self.parameters:
# Save the parameters to their appropriate places
logging.debug("GREP: {}".format(type(p.value)))
# Value is occasionally an Parameter object. Get the value from the object if this is the case.
if isinstance(p.value, model.Parameter):
logging.debug("Values is Parameter {}".format(p.value.value))
val = p.value.value
else:
val = p.value
if p.location == "query":
self.par_query[p.name] = val
elif p.location == "cookie":
self.par_cookie[p.name] = val
elif p.location == "header":
self.par_header[p.name] = val
elif p.location == "path":
self.url.parameter = val
# Remove {par} from url Dont do this here
#self.url.endpoint = re.sub('{.*}', '', self.url.endpoint, flags=re.DOTALL)
else:
logging.error("Error parsing parameters. Location not found")
return False, False
# Security
# TODO add support to other methods
logging.info("Security {}".format(self.security))
if self.security is not None:
# Apikey security
for sec in self.security:
# logging.debug(sec.type_)
# logging.debug("{} {} {}".format(sec.type_, sec.name, sec.location))
if sec.type_ == "apiKey":
if not args.apikey:
logging.error("Security type is {} but no apikey provided.".format(sec.type_))
sys.exit(0)
if sec.location == "query":
self.sec_query[sec.name] = args.apikey
elif sec.location == "cookie":
self.sec_cookie[sec.name] = args.apikey
elif sec.location == "header":
self.sec_header[sec.name] = args.apikey
else:
logging.error("Security location is {}. It need to be query, cookie or header")
sys.exit(0)
elif sec.type_ == "oauth2":
# If current session object isn't an oauth2 object we replace it with one
if not isinstance(session, requests_oauthlib.oauth2_session.OAuth2Session):
session, state = self.connect_oauth(sec)
if session is None:
logging.error("Oauth connection failed")
sys.exit(0)
# Check if current session scope contains all scopes of the current sec
# If not create new session
scopes = sec.get_scopes()
scopes = scopes["implicit"]
logging.debug("Scopes: {}".format(scopes))
if not all(elem in session.scope for elem in scopes):
logging.debug("Scopes don't match {} {}".format(session.scope, scopes))
session, state = self.connect_oauth(sec)
#else:
#logging.error("Security type {} provided".format(sec.type_))
#logging.error("Only apikey security is currently supported. Shutting down program")
#sys.exit(0)
# RequestBody
# Choose which requestBody to use if it exists
# Todo This structure is dumb
request_body = None
'''
if len(self.content) == 1:
r = self.content[0]
if r.type_ == "application/json":
request_body = r
# This should be technically right.
# Alternative ways: Check header and x-* fields for headers
# Try without and after 415 (unsupported media type) do this
self.header["Content-Type"] = r.type_
request_body = request_body.json()
elif "www-form-urlencoded" in r.type_:
request_body = r
self.header["Content-Type"] = r.type_
request_body = request_body.form_url_encoded()
else:
self.header["Content-Type"] = self.content[0].type_#"application/json"
request_body = self.content[0]
elif self.content is not None:
for r in self.content:
if r.type_ == "application/json":
request_body = r
# This should be technically right.
# Alternative ways: Check header and x-* fields for headers
# Try without and after 415 (unsupported media type) do this
self.header["Content-Type"] = r.type_
request_body = request_body.json()
break
elif "www-form-urlencoded" in r.type_:
request_body = r
self.header["Content-Type"] = r.type_
request_body = request_body.form_url_encoded()
'''
if self.content is not None:
for r in self.content:
if r.type_ == "application/json":
request_body = r
# This should be technically right.
# Alternative ways: Check header and x-* fields for headers
# Try without and after 415 (unsupported media type) do this
self.header["Content-Type"] = r.type_
request_body = request_body.json()
break
elif "www-form-urlencoded" in r.type_:
request_body = r
self.header["Content-Type"] = r.type_
request_body = request_body.form_url_encoded()
else:
self.header["Content-Type"] = r.type_
request_body = None
# TODO rethink requests creation.
# Tämä on vähän purkka. Multipart viestittely ei toimi requests jos itse asettaa header
# Olisi hyvä harkita sitä, ettei itse aseta koko header jos tämän saisi niin toimimaan
try:
if self.header["Content-Type"] == "multipart/form-data":
del self.header["Content-Type"]
except KeyError:
logging.debug("Deleted Content-Type header due to it being multipart/form-data")
# Endpoints should be named in a format like /<name>/ but servers can be either
# <url>/ or <url>. In case the url ends with / it should be removed
if self.url.base[0].endswith('/'):
self.url.base[0] = self.url.base[0][:-1]
r_url = ''.join((self.url.base[0], self.url.endpoint))
d = {}
if args.cheader:
d[args.cheader[0]] = args.cheader[1]
# self.header = {**self.header, **d}
# If there is no Session then we create a new one
if session is None:
session = requests.Session()
# If proxy argument was given, sets up proxy
if args.proxy:
proxyDict = {
"http": args.proxy,
"https": args.proxy,
"ftp": args.proxy
}
else:
proxyDict = None
try:
# Replace {par} with urlparameter
if self.url.parameter is not None:
r_url = re.sub('{.*}', str(self.url.parameter), r_url, flags=re.DOTALL)
logging.debug("\nSending {} to {} ".format(self.method, r_url))
logging.debug("Header params: {}\n Cookie params: {}\n Query params: {}\n Path params: {}\n Content: {}\n Files: {}".format(
{**self.par_header, **self.sec_header, **self.header, **d}, {**self.par_cookie, **self.sec_cookie}, {**self.par_query, **self.sec_query}, self.url.parameter, request_body, self.files))
if self.method == "GET":
r = session.get(r_url,
headers={**self.par_header, **self.sec_header, **self.header, **d}, cookies={**self.par_cookie, **self.sec_cookie},
params={**self.par_query, **self.sec_query}, data=request_body, files=self.files,
allow_redirects=self.ALLOW_REDIRECTS, proxies=proxyDict)
elif self.method == "POST":
r = session.post(r_url,
headers={**self.par_header, **self.sec_header, **self.header,**d}, cookies={**self.par_cookie, **self.sec_cookie},
params={**self.par_query, **self.sec_query}, data=request_body, files=self.files,
allow_redirects=self.ALLOW_REDIRECTS, proxies=proxyDict)
elif self.method == "DELETE":
r = session.delete(r_url,
headers={**self.par_header, **self.sec_header, **self.header, **d}, cookies={**self.par_cookie, **self.sec_cookie},
params={**self.par_query, **self.sec_query}, data=request_body, files=self.files,
allow_redirects=self.ALLOW_REDIRECTS, proxies=proxyDict)
elif self.method == "PUT":
r = session.put(r_url,
headers={**self.par_header, **self.sec_header, **self.header, **d}, cookies={**self.par_cookie, **self.sec_cookie},
params={**self.par_query, **self.sec_query}, data=request_body, files=self.files,
allow_redirects=self.ALLOW_REDIRECTS, proxies=proxyDict)
else:
logging.error("Error sending request. Method not found")
return False, False, False
return r.status_code, r, session
except ValueError:
# Requests library checks that header values can't contain \n and such RFC 7230 protocol stuff
# This means it crashes when radamsa gives it some fancy values.
# This is good because now outgoing requests should be in proper form
logging.exception("ValueError in req.send")
except Exception:
logging.error("#################EXCEPTION START###################")
logging.exception(
"EXCEPTION HAPPENED\nSent {} to {}\nHeader params: {}\n Cookie params: {}\n Query params: {}\n Path params: {}\n Content: {}\n Files: {}".format(
self.method, r_url, {**self.par_header, **self.sec_header, **self.header, **d}, {**self.par_cookie, **self.sec_cookie},
{**self.par_query, **self.sec_query}, self.url.parameter, request_body, self.files))
logging.error("#################EXCEPTION END###################")
# Should we still return session if send crashes?
return None, None, session
def use_good_values(self, good_values):
'''
Changes parameter values to same ones that can be found from good_values
This method might be too "dumb" and need work later
:param good_values:
:return:
'''
logging.debug("Using use_good_values")
logging.debug("Self.parameters: {}\n".format(len(self.parameters)))
for par in self.parameters:
logging.debug(par.name)
logging.debug("Good values: {}".format(len(good_values)))
for par in good_values:
logging.debug(par.name)
# logging.debug(par.value)
#Parameters.
for p in self.parameters:
for good in good_values:
if p.name == good.name:
logging.debug("Replaced {} {} with {} {}".format(p.name, p.value, good.name, good.value))
p.value = good.value
elif p.format_ != "object" and good.format_ == "object":
self.good_object(p, good)
# Requestbody
for rbody in self.content:
if rbody.params is not None:
for p in rbody.params:
for good in good_values:
logging.debug("Replaced {} {} with {} {}".format(p.name, p.value, good.name, good.value))
if p.name == good.name:
p.value = good.value
elif p.format_ != "object" and good.format_ == "object":
self.good_object(p, good)
return
def good_object(self, p, g):
'''
Helper function for use_good_values.
:param p:
:param g:
:return:
'''
for par in g.value:
if par.name == p.name:
logging.debug("Replaced {} {} with {} {}".format(p.name, p.value, par.name, par.value))
p.value = par.value
elif par.format_ != "object" and p.format_ == "object":
self.good_object(par, g)
def return_from_nested(self, parameter, name_list):
'''
Helper function for return_pars. Appends parameters to name_list
:param parameter:
:param name_list:
:return:
'''
logging.debug("Req/return_pars/nested: {} added".format(parameter.name))
name_list.append(parameter)
try:
if parameter.format_ in self.arrays or parameter.format_ in self.objects:
# If parameter is array or obj it has other parameters within
# Except obj can contain a single array or obj value
#print(parameter, parameter)
for par in parameter.value:
if par.format_ in self.objects or par.format_ in self.arrays:
name_list = self.return_from_nested(par, name_list)
else:
logging.debug("Req/return_pars/nested: {} added".format(par.name))
name_list.append(par)
except TypeError:
# Invoked when obj contains a single array
logging.exception("Return_from_nested TypeError")
if parameter.format_ in self.objects or parameter.format_ in self.arrays:
name_list = self.return_from_nested(parameter.value, name_list)
except AttributeError:
# Invoked in cases when obj or array contain a single string.
# In case of petshop triggering this might be a bug
logging.exception("Return_from_nested AttributeError")
return name_list
return name_list
def return_pars(self):
'''
Returns parameter names of all parameters within self.parameters and self.content
Iterates through both and appends their names.
If parameter is either object or array uses return_from_nested to get the inner variables
'''
logging.debug("Req {}{}".format(self.url.base, self.url.endpoint))
ret = []
for par in self.parameters:
if par.format_ in self.objects or par.format_ in self.arrays:
ret = self.return_from_nested(par, ret)
else:
logging.debug("Req/return_pars: {} added".format(par.name))
ret.append(par)
for rbody in self.content:
for par in rbody.params:
if par.format_ in self.objects or par.format_ in self.arrays:
ret = self.return_from_nested(par, ret)
else:
logging.debug("Req/return_pars: {} added".format(par.name))
ret.append(par)
return ret
def valid_response_codes(self):
'''
Returns a list containing all the valid response codes for this request.
Return codes are integers due to requests library returning integer codes.
Non int values like default are appended as they are.
:return:
'''
rcodes = []
for resp in self.responses:
try:
rcodes.append(int(resp.code))
except ValueError:
rcodes.append(resp.code)
logging.debug("{}{} {}".format(self.url.base, self.url.endpoint, self.method))
logging.debug("valid_response_codes {}".format(rcodes))
return rcodes
| null |
req.py
|
req.py
|
py
| 31,495 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.info",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 240,
"usage_type": "attribute"
},
{
"api_name": "datetime.timezone",
"line_number": 240,
"usage_type": "attribute"
},
{
"api_name": "random.choices",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "string.ascii_uppercase",
"line_number": 250,
"usage_type": "attribute"
},
{
"api_name": "string.digits",
"line_number": 250,
"usage_type": "attribute"
},
{
"api_name": "logging.debug",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "logging.exception",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "logging.exception",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "requests_oauthlib.OAuth2Session",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "model3.Parameter",
"line_number": 325,
"usage_type": "attribute"
},
{
"api_name": "logging.debug",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "requests_oauthlib.oauth2_session",
"line_number": 367,
"usage_type": "attribute"
},
{
"api_name": "logging.error",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 454,
"usage_type": "call"
},
{
"api_name": "requests.Session",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 483,
"usage_type": "call"
},
{
"api_name": "re.DOTALL",
"line_number": 483,
"usage_type": "attribute"
},
{
"api_name": "logging.debug",
"line_number": 485,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 486,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 513,
"usage_type": "call"
},
{
"api_name": "logging.exception",
"line_number": 521,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 523,
"usage_type": "call"
},
{
"api_name": "logging.exception",
"line_number": 524,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 528,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 539,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 540,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 542,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 543,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 545,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 552,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 562,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 578,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 590,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 601,
"usage_type": "call"
},
{
"api_name": "logging.exception",
"line_number": 606,
"usage_type": "call"
},
{
"api_name": "logging.exception",
"line_number": 614,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 625,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 632,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 640,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 657,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 658,
"usage_type": "call"
}
] |
549509363
|
import requests
from bs4 import BeautifulSoup
# import logging
# from logging.handlers import RotatingFileHandler
# import subprocess
# import pymongo
# from pymongo import MongoClient
from datetime import datetime
import re
import csv
# logger = logging.getLogger()
# logger.setLevel(logging.DEBUG)
# formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# fh = RotatingFileHandler('/Users/Freddie/Documents/Python/python_practice_projects/currency_rates/debenham/loggin_info.log', maxBytes = 10*1024*1024, backupCount = 30)
# fh.setFormatter(formatter)
# fh.setLevel(logging.DEBUG)
# logger.addHandler(fh)
url = 'http://finance.debenhams.com/travel-money/exchange-rates/'
r = requests.get(url)
results = []
if r.status_code == 200:
soup = BeautifulSoup(r.text, 'html.parser')
currency_table = soup.find_all('table', {'class': 'exchange'})
currencies = currency_table[0].find_all('tbody')[0]
currency_rows = currencies.find_all('tr')
regex = r'\/([\w\-\d]+)\.png'
pattern = re.compile(regex)
for row in currency_rows:
currency_name_row = row.find('td')
if currency_name_row != None:
currency_name = currency_name_row.find('img').get_text()
currency_code_link = currency_name_row.find('img').get('src')
currency_code = pattern.search(currency_code_link).group(1)
standard_rates_list = row.find_all('td', {'class':"std"})
standard_sell_rate = standard_rates_list[0].text
standard_buy_rate = standard_rates_list[1].text
cardholder_rates_list = row.find_all('td', {'class':"deb"})
cardholder_sell_rate = cardholder_rates_list[0].text
cardholder_buy_rate = cardholder_rates_list[1].text
results.append({'currency_name': currency_name,
'currency_code': currency_code,
'standard_sell_rate': standard_sell_rate,
'standard_buy_rate': standard_buy_rate,
'cardholder_sell_rate': cardholder_sell_rate,
'cardholder_buy_rate': cardholder_buy_rate,
'datetime': datetime.now()})
else:
pass
# logging.info(str(len(results)) + ' data downloaded.')
# mongod = subprocess.Popen('mongod', shell = True)
# client = MongoClient('localhost', 27017)
# db = client.currency_rates
# debenham = db.debenham
# debenham.insert_many(results)
# mongod.terminate()
else:
# logging.debug("Can't download the page.")
pass
def dict_to_csv(path, data, fieldnames):
with open(path, "w", newline = '', encoding = 'utf-8') as f:
writer = csv.DictWriter(f, delimiter = ',', fieldnames = fieldnames)
writer.writeheader()
for row in data:
writer.writerow(row)
path = '../rates/debnham_rates.csv'
fieldnames =results[0].keys()
dict_to_csv(path, results, fieldnames)
| null |
debenham/debenham_rates.py
|
debenham_rates.py
|
py
| 2,676 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "csv.DictWriter",
"line_number": 76,
"usage_type": "call"
}
] |
117904183
|
import matplotlib.pyplot as plt
def plot_data(data, name, file):
plt.figure()
plt.plot(range(len(data)), data[:,0], label="Training Set")
plt.plot(range(len(data)), data[:,1], label="Validation Set")
plt.plot(range(len(data)), data[:,2], label="Test Set")
plt.legend(loc='best')
plt.title("%s History" %(name))
plt.xlabel("Epoch")
plt.ylabel("%s" %(name))
plt.grid()
plt.savefig('results/%s' %(file), bbox_inches='tight')
plt.close()
def visualize_weights(w, file):
fig, axes = plt.subplots(nrows=2, ncols=5)
fig.suptitle('Weight Visualization', size=20)
for i, ax in enumerate(axes.flat):
heatmap = ax.imshow(w[:,i].reshape((28,28)), cmap = plt.cm.coolwarm)
ax.set_title(i)
ax.set_axis_off()
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(heatmap, cax=cbar_ax)
plt.savefig('results/%s' %(file), bbox_inches='tight')
plt.close()
| null |
ECE521-master/Assignment 2/plot.py
|
plot.py
|
py
| 977 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.cm",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
}
] |
491590686
|
import glob
import os
import self_awareness as sf
debug_mode = "off"
def get_absolute_path(*args):
fallback_output = "unable to get path"
log = ""
try:
relative_path = args[0]
if isinstance(relative_path, str):
fallback_output = relative_path
except Exception as e:
relative_path = "unable to get path"
log += str(e)
try:
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
absolute_path = os.path.join(__location__, relative_path)
res = absolute_path
except Exception as e:
res = fallback_output
log += str(e)
if debug_mode == "verbose":
print(log)
return res
sf.tests(get_absolute_path)
# takes file_type in the form like ".txt"
def recursive_file_search(*args):
try:
path, file_type = args
files = [f for f in glob.glob(path + "**/*" + file_type, recursive=True)]
if len(files) == 0:
path = get_absolute_path(path)
files = [f for f in glob.glob(path + "**/*" + file_type, recursive=True)]
sorted_files = sorted(files) # to make the order of the list deterministic (useful for tests etc)
res = sorted_files
except Exception as e:
res = []
if debug_mode == "verbose":
print(e)
return res
sf.tests(recursive_file_search)
# try:
# import zlib
# compression = zipfile.zip_deflated
# print("no zlib")
# except:
# compression = zipfile.zip_stored
# print("compression", compression)
def extract_all_zips(*args):
try:
# a = 2/0
from zipfile import ZipFile
input_dir = args[0]
files_before = set(recursive_file_search(input_dir, ".txt"))
paths = recursive_file_search(input_dir, ".zip")
if len(paths) == 0:
if debug_mode != "off":
print("no zip files found in", input_dir)
else:
for p in paths:
with ZipFile(p, "r") as zip_ref:
zip_ref.extractall(input_dir)
if debug_mode != "off":
print("\nextracted", p)
files_after = set(recursive_file_search(input_dir, ".txt"))
res = files_after.difference(files_before)
except Exception as e:
res = set()
if debug_mode == "verbose":
print("Extract_all_zips raised an exception:", e)
return res
sf.tests(extract_all_zips)
def get_random_element_of_list(*args):
i_list = []
log = ""
try:
temp = args[0]
if isinstance(temp, list):
i_list = temp
except Exception as e:
i_list = []
log += str(e)
if len(i_list) > 0:
try:
import random
rnd_index = random.randint(0, len(i_list) - 1)
res = i_list[rnd_index]
except Exception as e:
res = i_list[0]
log += str(e)
else:
res = None
if debug_mode == "verbose":
print(log)
return res
sf.tests(get_random_element_of_list)
def copy_by_element(*args):
try:
source_list = args[0]
if isinstance(source_list, list):
new_list = []
for i in range(len(source_list)):
new_list.append(source_list[i])
res = new_list
else:
res = []
except Exception as e:
res = []
if debug_mode == "verbose":
print(e)
return res
sf.tests(copy_by_element)
| null |
helper_funcs.py
|
helper_funcs.py
|
py
| 3,521 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.realpath",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "self_awareness.tests",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "self_awareness.tests",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "zipfile.ZipFile",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "self_awareness.tests",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "self_awareness.tests",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "self_awareness.tests",
"line_number": 138,
"usage_type": "call"
}
] |
334226015
|
# encoding: UTF-8
import datetime
import time
import json
import os
import jpype
from jpype import *
from vnpy.trader.vtGateway import *
from vnpy.trader.vtFunction import getJsonPath
from language import text
from vnpy.api.geya.geyaApi import GeyaBase
SYMBOL_XAUUSD = 'XAUUSD'
SYMBOL_XAGUSD = 'XAGUSD'
SYMBOL_MAP = {}
SYMBOL_MAP['XAUUSD'] = SYMBOL_XAUUSD
SYMBOL_MAP['XAGUSD'] = SYMBOL_XAGUSD
#平盘对手数据字典
COUNTERPARTYNAME = {}
COUNTERPARTYNAME['GOLDSACHS'] = 'GOLDSACHS'
COUNTERPARTYNAME['UBS_ZUR'] = 'UBS_ZUR'
TRADE_DIRECTION = {}
TRADE_DIRECTION['BUY'] = 'BUY'
TRADE_DIRECTION['SELL'] = 'SELL'
GEYA_PRODUCT_CLASS = 'P004'
# encoding: UTF-8
# 默认空值
EMPTY_STRING = ''
EMPTY_UNICODE = u''
EMPTY_INT = 0
EMPTY_FLOAT = 0.0
# 方向常量
DIRECTION_NONE = u'none'
DIRECTION_LONG = u'多'
DIRECTION_SHORT = u'空'
DIRECTION_UNKNOWN = u'unknown'
DIRECTION_NET = u'net'
DIRECTION_SELL = u'sell' # IB接口
DIRECTION_COVEREDSHORT = u'covered short' # 证券期权
# 开平常量
OFFSET_NONE = u'none'
OFFSET_OPEN = u'open'
OFFSET_CLOSE = u'close'
OFFSET_CLOSETODAY = u'close today'
OFFSET_CLOSEYESTERDAY = u'close yesterday'
OFFSET_UNKNOWN = u'unknown'
# 状态常量
STATUS_NOTTRADED = u'pending'
STATUS_PARTTRADED = u'partial filled'
STATUS_ALLTRADED = u'filled'
STATUS_CANCELLED = u'cancelled'
STATUS_REJECTED = u'rejected'
STATUS_UNKNOWN = u'unknown'
# 合约类型常量
PRODUCT_EQUITY = u'equity'
PRODUCT_FUTURES = u'futures'
PRODUCT_OPTION = u'option'
PRODUCT_INDEX = u'index'
PRODUCT_COMBINATION = u'combination'
PRODUCT_FOREX = u'forex'
PRODUCT_UNKNOWN = u'unknown'
PRODUCT_SPOT = u'spot'
PRODUCT_DEFER = u'defer'
PRODUCT_NONE = 'none'
# 价格类型常量
PRICETYPE_LIMITPRICE = u'limit order'
PRICETYPE_MARKETPRICE = u'market order'
PRICETYPE_FAK = u'FAK'
PRICETYPE_FOK = u'FOK'
# 期权类型
OPTION_CALL = u'call'
OPTION_PUT = u'put'
# 交易所类型
EXCHANGE_SSE = 'SSE' # 上交所
EXCHANGE_SZSE = 'SZSE' # 深交所
EXCHANGE_CFFEX = 'CFFEX' # 中金所
EXCHANGE_SHFE = 'SHFE' # 上期所
EXCHANGE_CZCE = 'CZCE' # 郑商所
EXCHANGE_DCE = 'DCE' # 大商所
EXCHANGE_SGE = 'SGE' # 上金所
EXCHANGE_INE = 'INE' # 国际能源交易中心
EXCHANGE_UNKNOWN = 'UNKNOWN'# 未知交易所
EXCHANGE_NONE = '' # 空交易所
EXCHANGE_HKEX = 'HKEX' # 港交所
EXCHANGE_HKFE = 'HKFE' # 香港期货交易所
EXCHANGE_SMART = 'SMART' # IB智能路由(股票、期权)
EXCHANGE_NYMEX = 'NYMEX' # IB 期货
EXCHANGE_GLOBEX = 'GLOBEX' # CME电子交易平台
EXCHANGE_IDEALPRO = 'IDEALPRO' # IB外汇ECN
EXCHANGE_CME = 'CME' # CME交易所
EXCHANGE_ICE = 'ICE' # ICE交易所
EXCHANGE_LME = 'LME' # LME交易所
EXCHANGE_OANDA = 'OANDA' # OANDA外汇做市商
EXCHANGE_OKCOIN = 'OKCOIN' # OKCOIN比特币交易所
EXCHANGE_HUOBI = 'HUOBI' # 火币比特币交易所
EXCHANGE_LHANG = 'LHANG' # 链行比特币交易所
# 货币类型
CURRENCY_USD = 'USD' # 美元
CURRENCY_CNY = 'CNY' # 人民币
CURRENCY_HKD = 'HKD' # 港币
CURRENCY_UNKNOWN = 'UNKNOWN' # 未知货币
CURRENCY_NONE = '' # 空货币
# 数据库
LOG_DB_NAME = 'VnTrader_Log_Db'
# 接口类型
GATEWAYTYPE_EQUITY = 'equity' # 股票、ETF、债券
GATEWAYTYPE_FUTURES = 'futures' # 期货、期权、贵金属
GATEWAYTYPE_INTERNATIONAL = 'international' # 外盘
GATEWAYTYPE_BTC = 'btc' # 比特币
GATEWAYTYPE_DATA = 'data' # 数据(非交易)
class GeyaGateway(VtGateway):
def __init__(self, eventEngine, gatewayName='Geya'):
"""Constructor"""
super(GeyaGateway, self).__init__(eventEngine, gatewayName)
self.qryEnabled = False # 循环查询
self.api = GeyaApi(self)
self.rmiIp = EMPTY_STRING
self.rmiPort = EMPTY_STRING
self.fileName = self.gatewayName + '_connect.json'
self.filePath = getJsonPath(self.fileName, __file__)
def connect(self):
"""连接"""
try:
f = file(self.filePath)
except IOError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = text.LOADING_ERROR
self.onLog(log)
return
# 解析json文件
setting = json.load(f)
try:
self.rmiIp = str(setting['rmiIp'])
self.rmiPort = str(setting['rmiPort'])
except KeyError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = text.CONFIG_KEY_MISSING
self.onLog(log)
return
# 初始化接口
self.api.connect()
self.writeLog(u'接口初始化成功')
# 初始化并启动查询
self.initQuery()
#self.api.queryPrice()
# ----------------------------------------------------------------------
def writeLog(self, content):
"""发出日志"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = content
self.onLog(log)
# ----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅行情,自动订阅全部行情,无需实现"""
pass
def sendOrder(self, orderReq):
"""发单"""
return self.api.sendOrder(orderReq)
# ----------------------------------------------------------------------
def close(self):
"""关闭"""
self.api.exit()
# ----------------------------------------------------------------------
def initQuery(self):
"""初始化连续查询"""
if self.qryEnabled:
self.qryFunctionList = [self.api.queryAllPrice]
self.startQuery()
# ----------------------------------------------------------------------
def query(self, event):
"""注册到事件处理引擎上的查询函数"""
for function in self.qryFunctionList:
function()
# ----------------------------------------------------------------------
def startQuery(self):
"""启动连续查询"""
self.eventEngine.register(EVENT_TIMER, self.query)
# ----------------------------------------------------------------------
def setQryEnabled(self, qryEnabled):
"""设置是否要启动循环查询"""
self.qryEnabled = qryEnabled
class GeyaApi(GeyaBase):
"""Constructor"""
def __init__(self, gateway):
super(GeyaApi, self).__init__()
self.gateway = gateway
self.gatewayName = gateway.gatewayName
self.interval = 1
self.localID = 0 # 本地委托号
self.localSystemDict = {} # key:localID, value:systemID
self.systemLocalDict = {} # key:systemID, value:localID
self.workingOrderDict = {} # key:localID, value:order
self.reqLocalDict = {} # key:reqID, value:localID
self.cancelDict = {} # key:localID, value:cancelOrderReq
self.tradeID = 0
self.tickDict = {} # key:symbol, value:tick
# ----------------------------------------------------------------------
def connect(self):
"""初始化"""
self.init()
# 推送合约信息,平盘行情查询接口根据币种和平盘对手确定一条记录
for counterPartyName in COUNTERPARTYNAME.keys():
for symbol in SYMBOL_MAP.keys():
contract = VtContractData()
contract.gatewayName = self.gatewayName
contract.symbol = symbol
#contract.exchange = EXCHANGE_LHANG
contract.vtSymbol = '.'.join([symbol, counterPartyName])
#contract.vtSymbol = contract.symbol
if symbol == SYMBOL_XAUUSD:
contract.name = u'黄金美元现货'
elif symbol == SYMBOL_XAGUSD:
contract.name = u'白银美元现货'
contract.size = 1
contract.priceTick = 0.01
contract.productClass = PRODUCT_SPOT
self.gateway.onContract(contract)
# ----------------------------------------------------------------------
def sendOrder(self, req):
"""发单"""
"""发送委托"""
# 发送限价委托
#s = SYMBOL_MAP_REVERSE[req.symbol]
if req.direction == DIRECTION_LONG:
tradeSide = 'BUY'
else:
tradeSide = 'SELL'
#生成24位流水号,规则:G001+20位流水
serial = GEYA_PRODUCT_CLASS+datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')
reqDate = datetime.datetime.now().strftime('%Y%m%d')
reqTime = datetime.datetime.now().strftime('%H:%M:%S')
#req.multiplier 容忍点差
req.multiplier = '0'
reqID = self.createOrder(serial, GEYA_PRODUCT_CLASS, req.symbol, reqDate, reqTime, req.volume,
req.lastTradeDateOrContractMonth, tradeSide, req.price, req.multiplier)
self.localID += 1
localID = str(self.localID)
self.reqLocalDict[reqID] = localID
# 推送委托信息
order = VtOrderData()
order.gatewayName = self.gatewayName
order.symbol = req.symbol
order.vtSymbol = req.symbol
#order.exchange = EXCHANGE_LHANG
#order.vtSymbol = '.'.join([order.symbol, order.exchange])
#order.orderID = localID
#order.vtOrderID = '.'.join([order.orderID, order.gatewayName])
order.orderID = serial
order.vtOrderID = '.'.join([order.orderID, order.gatewayName])
order.direction = req.direction
#order.offset = OFFSET_UNKNOWN
order.price = req.price
order.volume = req.volume
order.orderTime = datetime.datetime.now().strftime('%H:%M:%S.%f')[:-3]
order.status = STATUS_UNKNOWN
self.workingOrderDict[localID] = order
self.gateway.onOrder(order)
# 返回委托号
return order.vtOrderID
# ----------------------------------------------------------------------
def queryPrice(self):
"""查询最优平盘额度"""
for s in SYMBOL_MAP.keys():
#self.getDepth(s, self.gateway.rmiIp, self.gateway.rmiPort) #查询平盘全部额度
for d in TRADE_DIRECTION.keys():
self.getTicker(s, d, self.gateway.rmiIp, self.gateway.rmiPort) #查询平盘最优额度,考虑买卖方向
# ----------------------------------------------------------------------
def queryAllPrice(self):
"""查询全部平盘额度"""
for s in SYMBOL_MAP.keys():
self.getDepth(s, self.gateway.rmiIp, self.gateway.rmiPort)
# ----------------------------------------------------------------------
def onGetTicker(self, data, req, reqID):
"""查询行情回调"""
price = data['price'].doubleValue()#平盘价格
tradeLimitAmount = data['tradeLimitAmount'].doubleValue()#平盘额度
params = req['params']
symbol = SYMBOL_MAP[params['symbol']]
direction = params['direction']
if symbol not in self.tickDict:
tick = VtTickData()
tick.gatewayName = self.gatewayName
tick.symbol = symbol
tick.vtSymbol = symbol
self.tickDict[symbol] = tick
else:
tick = self.tickDict[symbol]
if direction == 'BUY':
tick.bidPrice1 = price
tick.bidVolume1 = tradeLimitAmount
tick.askPrice1 = EMPTY_FLOAT
tick.askVolume1 = EMPTY_INT
elif direction == 'SELL':
tick.askPrice1 = price
tick.askVolume1 = tradeLimitAmount
tick.bidPrice1 = EMPTY_FLOAT
tick.bidVolume1 = EMPTY_INT
tick.lastPrice = price
tick.volume = tradeLimitAmount
tick.exchange = EMPTY_STRING
tick.openInterest = EMPTY_INT
tick.time = datetime.datetime.now().strftime('%H%M%S')
tick.date = datetime.datetime.now().strftime('%Y%m%d')
tick.openPrice = EMPTY_FLOAT
tick.highPrice = EMPTY_FLOAT
tick.lowPrice = EMPTY_FLOAT
tick.preClosePrice = EMPTY_FLOAT
tick.upperLimit = EMPTY_FLOAT
tick.lowerLimit = EMPTY_FLOAT
self.gateway.onTick(tick)
# 自动平盘查询全部额度回调,入参data是一个list---------------------------------------
def onGetDepth(self, data, req, reqID):
"""自动平盘查询全部额度回调"""
params = req['params']
symbol = SYMBOL_MAP[params['symbol']]
if data['resList'].size() == 0:#查询结果为空
return
for ele in data['resList']:
counterPartyName = ele.getCounterPartyName()#平盘对手
vtSymbol = symbol+"."+counterPartyName#保证唯一性
if vtSymbol not in self.tickDict:
tick = VtTickData()
tick.gatewayName = self.gatewayName
tick.symbol = symbol
tick.vtSymbol = vtSymbol
self.tickDict[symbol] = tick
else:
tick = self.tickDict[vtSymbol]
tick.bidPrice1 = ele.getBid().doubleValue()
tick.bidVolume1 = ele.getBidAmount().doubleValue()
tick.askPrice1 = ele.getAsk().doubleValue()
tick.askVolume1 = ele.getAskAmount().doubleValue()
now = datetime.datetime.now()
tick.time = now.strftime('%H:%M:%S.%f')[:-3]
tick.date = now.strftime('%Y%m%d')
tick.askPrice5 = counterPartyName
self.gateway.onTick(tick)
# ----------------------------------------------------------------------
#外汇平盘交易在未指定平盘对手时可能会有多笔成交,data中成交记录可能有
def onCreateTrade(self, data, req, reqID):
# 创建报单数据对象
details = data['details']#自动平盘成交记录
for ele in details:
trade = VtTradeData()
trade.gatewayName = self.gatewayName
trade.symbol = data['exnm']
trade.exchange = EMPTY_STRING
trade.vtSymbol = data['exnm']
trade.orderID = data['trsn']
trade.vtOrderID = self.gatewayName+"."+data['trsn']
trade.direction = data['direction']
trade.offset = EMPTY_UNICODE
trade.price = ele.getExpc().doubleValue()
trade.volume = ele.getLamt().doubleValue()#默认取左头寸,将BigDecimal转化为double在vnpy中没有BigDecimal)以便后续计算
trade.tradeTime = ele.getTrtm()
trade.tradeID = data['trsn'] + ele.getPpds() #成交编号规则:委托单号+平盘对手
self.gateway.onTrade(trade)
def onCreateOrder(self, data, req, reqID):
"""发单回调"""
localID = self.reqLocalDict[reqID]
#systemID = data['id']
#self.localSystemDict[localID] = systemID
#self.systemLocalDict[systemID] = localID
# 撤单
#if localID in self.cancelDict:
# req = self.cancelDict[localID]
# self.cancel(req)
# del self.cancelDict[localID]
# 推送委托信息
order = self.workingOrderDict[localID]
if data['code'] == '00000':
order.status = STATUS_NOTTRADED
self.gateway.onTrade(order)
def exit(self):
pass
| null |
vnpy/trader/gateway/geyaGateway/geyaGateway.py
|
geyaGateway.py
|
py
| 15,616 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "vnpy.trader.vtFunction.getJsonPath",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "language.text.LOADING_ERROR",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "language.text",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "language.text.CONFIG_KEY_MISSING",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "language.text",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "vnpy.api.geya.geyaApi.GeyaBase",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 283,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 284,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 285,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 313,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 369,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 370,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 405,
"usage_type": "attribute"
}
] |
306902335
|
try:
import config_local as config
except:
import config
from datetime import datetime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine, ForeignKey
from sqlalchemy import Column, Integer, String, DateTime, Text
from sqlalchemy.orm import sessionmaker, scoped_session, relationship, backref
engine = create_engine(config.DB_URI, echo=False)
db_session = scoped_session(sessionmaker(bind=engine,
autocommit = False,
autoflush = False))
Base = declarative_base()
Base.query = db_session.query_property()
class User(Base):
__tablename__ = "user"
id = Column(Integer, primary_key=True)
name = Column(String(), nullable=False)
fb_id = Column(String(), nullable=False, index=True, unique=True)
mobile = Column(String(), nullable=False)
@staticmethod
def phone_lookup(user_id_list):
phone_list = []
for user_id in user_id_list:
user = db_session.query(User).filter_by(id=user_id).first()
phone_list.append(user.mobile)
return phone_list
@staticmethod
def lookup(facebook_id):
user = db_session.query(User).filter_by(fb_id=facebook_id).first()
if user is not None:
return user.id
else:
return None
@staticmethod
def register(facebook_name, facebook_id, mobile):
new_user = User(name=facebook_name, fb_id=facebook_id, mobile=mobile)
db_session.add(new_user)
db_session.commit()
return new_user.id
class Ridership(Base):
__tablename__ = "rider"
ride_id = Column(Integer(), ForeignKey('ride.id'), primary_key=True)
user_id = Column(Integer(), ForeignKey('user.id'), index=True, primary_key=True)
@staticmethod
def get_twil_users(ride):
twil_user_list = db_session.query(Ridership).filter_by(ride_id=ride).all()
user_ids_list = []
for user in twil_user_list:
user_ids_list.append(user.user_id)
return user_ids_list
@staticmethod
def join_ride(ride, user):
ridership_check = db_session.query(Ridership).\
filter_by(ride_id=ride, user_id=user).first()
import pprint
if ridership_check is not None:
pprint.pprint("Rider already registered for this ride")
else:
new_rider = Ridership(ride_id=ride, user_id=user)
db_session.add(new_rider)
db_session.commit()
pprint.pprint("Rider added")
return True
@staticmethod
def my_rides(user):
all_rides = db_session.query(Ridership).filter_by(user_id=user).all()
return all_rides
@staticmethod
def remove_rider(user, ride):
leave_ride = db_session.query(Ridership).\
filter_by(ride_id=ride, user_id=user).first()
db_session.delete(leave_ride)
db_session.commit()
import pprint
pprint.pprint("Rider removed")
class Ride(Base):
__tablename__ = "ride"
id = Column(Integer, primary_key=True)
creator_id = Column(Integer(), ForeignKey('user.id'))
ride_name = Column(String(), nullable=False)
ride_datetime = Column(DateTime(), nullable=False, index=True)
route_location = Column(String(), nullable=False)
route_coords = Column(String(), nullable=False)
riders = relationship('User', secondary=Ridership.__table__, backref="ride")
@staticmethod
def get_full_rides(ride_id):
my_rides = db_session.query(Ride).filter_by(id=ride_id)
return my_rides
@staticmethod
def store(c_id, r_name, r_datetime, r_location, r_coords):
new_ride = Ride(creator_id=c_id, ride_name=r_name, ride_datetime=r_datetime, route_location=r_location, route_coords=r_coords)
db_session.add(new_ride)
db_session.commit()
return new_ride.id
# working on this for Twilio page
@staticmethod
def todays_rides(today, tomorrow):
rides_today = db_session.query(Ride).filter(Ride.ride_datetime.between(today, tomorrow)).all()
rides_list = []
if rides_today != []:
for ride in rides_today:
rides_list.append(ride)
return rides_list
else:
return False
@staticmethod
def future_rides_lookup(search_date, next_date):
future_rides = db_session.query(Ride).filter(Ride.ride_datetime.between(search_date, next_date)).all()
if future_rides != []:
return future_rides
else:
return False
def create_tables():
Base.metadata.create_all(engine)
if __name__ == "__main__":
create_tables()
| null |
model.py
|
model.py
|
py
| 4,719 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sqlalchemy.create_engine",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "config.DB_URI",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.orm.scoped_session",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ext.declarative.declarative_base",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 26,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 102,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.DateTime",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.relationship",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "{'pprint': 'pprint'}.__table__",
"line_number": 109,
"usage_type": "attribute"
}
] |
569380448
|
import A1_image_filtering
import cv2
import numpy as np
import time
IMAGE_FILE_NAME_LIST = ["lenna.png", "shapes.png"]
def compute_image_gradient(img):
filtered_img = A1_image_filtering.cross_correlation_2d(img,A1_image_filtering.get_gaussian_filter_2d(7,1.5))
sovel_xx = [[-1,0,1]]
sovel_xy = [[1], [2], [1]]
sovel_yx = [[1, 2, 1]]
sovel_yy = [[-1], [0], [1]]
sovel_x_img=A1_image_filtering.cross_correlation_1d(filtered_img, sovel_xy)
sovel_x_img=A1_image_filtering.cross_correlation_1d(sovel_x_img, sovel_xx)
sovel_y_img = A1_image_filtering.cross_correlation_1d(filtered_img, sovel_yy)
sovel_y_img = A1_image_filtering.cross_correlation_1d(sovel_y_img, sovel_yx)
dir = np.arctan2(sovel_y_img,sovel_x_img)
mag = np.array(np.sqrt(np.square(sovel_x_img)+np.square(sovel_y_img)))
#print(np.sum(np.sqrt(np.square(sovel_x_img) + np.square(sovel_y_img)) >= 256))
return mag,dir
def non_maximum_suppression_dir (mag,dir):
mag_boundary = mag.shape
dir_forward = np.array([[0, 1], [-1, 1], [-1, 0], [-1, -1], [0, -1], [1, -1], [1, 0], [1, 1]])
dir_backward = np.array([[0, -1], [1, -1], [1, 0], [1, 1], [0, 1], [-1, 1], [-1, 0], [-1, -1]])
dir_forward = np.array([[0, 1], [-1, -1], [-1, 0], [-1, 1], [0, -1], [1, 1], [1, 0], [1, -1]])
dir_backward = np.array([[0, -1], [1, 1], [1, 0], [1, -1], [0, 1], [-1, -1], [-1, 0], [-1, 1]])
dir=(dir*180/np.pi)%360
dir=dir/45
dir[dir>=7.5] = 0
dir=np.around(dir).astype('int')
indices = np.array(list(np.ndindex((dir.shape[0], dir.shape[1])))).reshape(dir.shape[0], dir.shape[1], 2)
forward = dir_forward[dir[:]]+indices
backward = dir_backward[dir[:]]+indices
output=mag.copy()
forward[forward[:, :, 0] < 0, 0]=0
forward[forward[:, :, 1] < 0, 1] = 0
backward[backward[:, :, 0] < 0, 0] = 0
backward[backward[:, :, 1] < 0, 1] = 0
# print(forward[:, :, 0] > mag_boundary[0]-1)
forward[forward[:, :, 0] > mag_boundary[0]-1,0] = mag_boundary[0]-1
forward[forward[:, :, 1] > mag_boundary[1]-1,1] = mag_boundary[1]-1
# print(mag_boundary, np.max(forward[:,:,0]),np.max(forward[:,:,1]))
backward[backward[:, :, 0] > mag_boundary[0]-1,0] = mag_boundary[0]-1
backward[backward[:, :, 1] > mag_boundary[1]-1,1] = mag_boundary[1]-1
output[mag[forward[:, :, 0], forward[:, :, 1]] > mag[:]] = 0
output[mag[backward[:, :, 0], backward[:, :, 1]] > mag[:]] = 0
return output
if __name__ =="__main__":
#IMAGE_FILE_NAME_LIST = ["byeongkeon.png"]
for IMAGE_NAME in IMAGE_FILE_NAME_LIST:
IMAGE_FILE_PATH = "image/"+IMAGE_NAME
img = cv2.imread(IMAGE_FILE_PATH, cv2.IMREAD_GRAYSCALE)
start = time.time()
mag, dir = compute_image_gradient(img)
print(IMAGE_NAME,"compute_image_gradient Computation time : ", time.time() - start)
cv2.imshow("part_2_edge_raw_"+IMAGE_NAME, mag / 255)
cv2.imwrite("./result/part_2_edge_raw_"+IMAGE_NAME, mag)
start = time.time()
suppressed_mag = non_maximum_suppression_dir(mag, dir)
print(IMAGE_NAME,"non_maximum_suppression_dir Computation time : ", time.time() - start)
cv2.imshow("part_2_edge_sup_"+IMAGE_NAME, suppressed_mag / 255)
cv2.imwrite("./result/part_2_edge_sup_"+IMAGE_NAME, suppressed_mag)
cv2.waitKey()
cv2.destroyAllWindows()
| null |
CV_A1/A1_edge_detection.py
|
A1_edge_detection.py
|
py
| 3,455 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "A1_image_filtering.cross_correlation_2d",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "A1_image_filtering.get_gaussian_filter_2d",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "A1_image_filtering.cross_correlation_1d",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "A1_image_filtering.cross_correlation_1d",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "A1_image_filtering.cross_correlation_1d",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "A1_image_filtering.cross_correlation_1d",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.arctan2",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.square",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "numpy.around",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.ndindex",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_GRAYSCALE",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 82,
"usage_type": "call"
}
] |
183710655
|
#edit by Zhang Kun@20180527
#Graduate School at Shenzhen, Tsinghua University
# -*- coding: UTF-8 -*-
import h5py
import numpy as np
import tensorflow as tf
import os
import random
from tensorflow.python import debug as tf_debug
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
INITIAL_LEARNING_RATE = 0.0001 # Initial learning rate.
BATCH_SIZE = 20
HDF5_PATH = '/data/nfs/zzw/data/sogou_chinese/train_data.hdf5'
TOTAL_CYCLE = 24
def _proc_script_item(item):
item_split = item.split('_')
query_txt = item_split[0]
seg_name = '_'.join(item_split[1:7])
query_start = int(item_split[7])
query_end = int(item_split[8])
return (query_txt, seg_name, query_start, query_end)
def _convert_hdf5_to_tfrecord():
#将hdf5_items转成训练、验证、测试集文件, train_hdf5_samples, eval_hdf5_samples, test_hdf5_samples, 7:1:2,正负样本比例1:1
hdf5_items = []
#去掉不存在的item
# with h5py.File('/data/nfs/zzw/data/sogou_chinese/train_data.hdf5', 'r') as f:
# with open('/data/nfs/zzw/data/sogou_chinese/train_diff_pair_0214.txt', 'r', encoding='UTF-8') as f_script:
# for line in f_script.readlines():
# line = line.strip()
# words = line.split()
# for i in range(2):
# word = words[i]
# query_txt, seg_name, query_frame_start, query_frame_end = _proc_script_item(word)
# try:
# seg_frame = np.array(f[seg_name]['source'])
# train_items.append(word)
# except KeyError as err:
# print(str(err))
# 生成完整的(features, label)样本 (query_txt, query_name, query_start, query_end, seg_name, label)
with open('hdf5_items', 'r') as f_r:
for line in f_r.readlines():
line = line.strip()
hdf5_items.append(line)
print('Total {} items in hdf5_items'.format(len(hdf5_items)))
count = int(len(hdf5_items)/2)
positive_samples = []
negative_samples = []
for i in range(count):
query_txt_1, seg_name_1, query_start_1, query_end_1 = _proc_script_item(hdf5_items[i])
query_txt_2, seg_name_2, query_start_2, query_end_2 = _proc_script_item(hdf5_items[i+count])
#positive sample
query_txt = query_txt_1
query_name = seg_name_1
query_start = query_start_1
query_end = query_end_1
seg_name = seg_name_1
label = 1
positive_samples.append('-'.join([query_txt, query_name, str(query_start), str(query_end), seg_name, str(label)]))
query_txt = query_txt_2
query_name = seg_name_2
query_start = query_start_2
query_end = query_end_2
seg_name = seg_name_2
label = 1
positive_samples.append('-'.join([query_txt, query_name, str(query_start), str(query_end), seg_name, str(label)]))
#negative sample
query_txt = query_txt_1
query_name = seg_name_1
query_start = query_start_1
query_end = query_end_1
seg_name = seg_name_2
label = 0
negative_samples.append('-'.join([query_txt, query_name, str(query_start), str(query_end), seg_name, str(label)]))
query_txt = query_txt_2
query_name = seg_name_2
query_start = query_start_2
query_end = query_end_2
seg_name = seg_name_1
label = 0
negative_samples.append('-'.join([query_txt, query_name, str(query_start), str(query_end), seg_name, str(label)]))
random.shuffle(positive_samples)
random.shuffle(negative_samples)
train_samples = positive_samples[:int(0.7*len(positive_samples))]+negative_samples[:int(0.7*len(negative_samples))]
random.shuffle(train_samples)
eval_samples = positive_samples[int(0.7*len(positive_samples)):int(0.8*len(positive_samples))]+negative_samples[int(0.7*len(negative_samples)):int(0.8*len(negative_samples))]
random.shuffle(eval_samples)
test_samples = positive_samples[int(0.8*len(positive_samples)):]+negative_samples[int(0.8*len(negative_samples)):]
random.shuffle(test_samples)
print('train_samples {}\neval_samples {}\ntest_samples {}\n'.format(len(train_samples), len(eval_samples), len(test_samples)))
with open('train_hdf5_samples', 'w') as f_w:
for sample in train_samples:
f_w.write(sample+'\n')
print('train_hdf5_samples {}'.format(len(train_samples)))
with open('eval_hdf5_samples', 'w') as f_w:
for sample in eval_samples:
f_w.write(sample+'\n')
print('eval_hdf5_samples {}'.format(len(eval_samples)))
with open('test_hdf5_samples', 'w') as f_w:
for sample in test_samples:
f_w.write(sample+'\n')
print('test_hdf5_samples {}'.format(len(test_samples)))
return train_samples, eval_samples, test_samples
def data_generator(samples_txt_path, hdf5_path):
with h5py.File(hdf5_path, 'r') as hdf5_file:
with open(samples_txt_path, 'r') as samples_txt_file:
for line in samples_txt_file.readlines():
line = line.strip()
query_txt, query_name, query_start, query_end, seg_name, label = line.split('-')
query_start = int(query_start)
query_end = int(query_end)
label = int(label)
try:
query_frame = np.array(hdf5_file[query_name]['source'])
seg_frame = np.array(hdf5_file[seg_name]['source'])
yield ({'query_txt':query_txt,'query_frame':query_frame[query_start:query_end+1, :],\
'query_length':query_end + 1 - query_start, 'seg_frame':seg_frame,\
'seg_length':seg_frame.shape[0]}, label)
except KeyError as err:
print(str(err))
print('{} is error line'.format(line))
def input_fn(samples_txt_path, epochs, isshuffle):
dataset = tf.data.Dataset.from_generator(lambda :data_generator(samples_txt_path, HDF5_PATH), \
({'query_txt': tf.string, 'query_frame': tf.float32, 'query_length':tf.int32, 'seg_frame': tf.float32, 'seg_length':tf.int32}, tf.int32), \
({'query_txt': tf.TensorShape([]), 'query_frame': tf.TensorShape([None, 40]), 'query_length':tf.TensorShape([]),
'seg_frame': tf.TensorShape([None, 40]), 'seg_length':tf.TensorShape([])}, tf.TensorShape([])))
if isshuffle:
dataset = dataset.shuffle(100).repeat(epochs).padded_batch(BATCH_SIZE,
padded_shapes=({'query_txt': tf.TensorShape([]), 'query_frame': tf.TensorShape([None, 40]), 'query_length':tf.TensorShape([]),
'seg_frame': tf.TensorShape([None, 40]), 'seg_length':tf.TensorShape([])}, tf.TensorShape([])),
padding_values=({'query_txt':"default", 'query_frame':1.0, 'query_length':1, 'seg_frame':1.0, 'seg_length':1}, 1)
).filter(lambda x,y: tf.equal(tf.size(y), BATCH_SIZE))
else:
dataset = dataset.repeat(epochs).padded_batch(BATCH_SIZE,
padded_shapes=({'query_txt': tf.TensorShape([]), 'query_frame': tf.TensorShape([None, 40]), 'query_length':tf.TensorShape([]),
'seg_frame': tf.TensorShape([None, 40]), 'seg_length':tf.TensorShape([])}, tf.TensorShape([])),
padding_values=({'query_txt':"default", 'query_frame':1.0, 'query_length':1, 'seg_frame':1.0, 'seg_length':1}, 1)
).filter(lambda x,y: tf.equal(tf.size(y), BATCH_SIZE))
return dataset
def lstm_cell():
return tf.contrib.rnn.BasicLSTMCell(128)
def _single_hop_of_seg_encoder(VQ, features):
segment_encoder_lstm =\
tf.contrib.rnn.MultiRNNCell([lstm_cell() for _ in range(2)])
segment_state = segment_encoder_lstm.zero_state(BATCH_SIZE, dtype=tf.float32)
seg_outputs, _ = tf.nn.dynamic_rnn(cell=segment_encoder_lstm\
, inputs=features['seg_frame'], dtype=tf.float32\
, initial_state=segment_state\
, scope='seg_dynamic_rnn', sequence_length=features['seg_length'])
a = seg_outputs
d = VQ[:,None,:]
dot_prod = tf.keras.backend.batch_dot(a,d,axes=[2,2])
norm_l2 = tf.norm(a,axis=2)*tf.norm(d, axis=2)
attentions = tf.exp(dot_prod/(norm_l2[:,:,None]+0.00001), name='attentions') #[batch_size, timesteps, 1]
masked_attentions = tf.sequence_mask(features['seg_length'], dtype=tf.float32)[:,:,None]*attentions
norm_factor_atten = tf.reduce_sum(masked_attentions, axis=1, name='attention_norm_factor')
attentions_norm = masked_attentions / norm_factor_atten[:,None,:]
VS = tf.reduce_sum(seg_outputs*attentions_norm, axis=1, name='VS')
# print(VS.shape)
return VS
#自定义模型函数
def model_fn(features, labels, mode):
with tf.name_scope('query_encoder'):
query_frame = features['query_frame']#[batch_size, time_steps, fbank_dim_len]
query_encoder_lstm = \
tf.contrib.rnn.MultiRNNCell([lstm_cell() for _ in range(2)])
query_state = query_encoder_lstm.zero_state(BATCH_SIZE, dtype=tf.float32)
query_outputs, _ = tf.nn.dynamic_rnn(cell=query_encoder_lstm\
, inputs=query_frame, dtype=tf.float32\
, initial_state=query_state\
, scope='query_dynamic_rnn', sequence_length=features['query_length'])
#masking padding zero value
a = tf.range(BATCH_SIZE)
b = features['query_length']-1
indices = tf.stack([a, b], axis=1)
VQ = tf.gather_nd(params=query_outputs, indices=indices)
with tf.variable_scope('seg_audio_encoder') as seg_encoder_scope:
VS = tf.zeros(shape=VQ.shape, dtype=tf.float32)
VQ_r = VQ
for i in range(2):
VQ_r = VQ_r + VS
VS = _single_hop_of_seg_encoder(VQ_r, features)
seg_encoder_scope.reuse_variables()
with tf.name_scope('detector'):
input_layer = tf.concat([VQ, VS], 1)
dense_1 = tf.layers.dense(inputs=input_layer, units=128, activation=tf.nn.relu)
dense_2 = tf.layers.dense(inputs=dense_1, units=64, activation=tf.nn.relu)
dense_3 = tf.layers.dense(inputs=dense_2, units=32, activation=tf.nn.relu)
logits = tf.layers.dense(inputs=dense_3, units=2, name='logits')
#compute predictions
predictions = {
"classes": tf.argmax(input=logits, axis=1, name='classes'),
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
#PREDICT MODE
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
#compute loss
onehot_labels = tf.one_hot(labels, depth=2)
loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits)
#compute metrics
accuracy = tf.metrics.accuracy(labels=labels, predictions=predictions['classes'])
precision = tf.metrics.precision(labels=labels, predictions=predictions['classes'])
recall = tf.metrics.recall(labels=labels, predictions=predictions['classes'])
tf.summary.scalar('accuracy', accuracy[1])
tf.summary.scalar('precision', precision[1])
tf.summary.scalar('recall', recall[1])
#EVAL_MODE
if mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {'accuracy': accuracy, 'precision':precision, 'recall':recall}
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
#TRAIN_MODE
if mode == tf.estimator.ModeKeys.TRAIN:
# optimizer = tf.train.AdamOptimizer(learning_rate=INITIAL_LEARNING_RATE)
# train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
optimizer = tf.train.AdamOptimizer(learning_rate=INITIAL_LEARNING_RATE)
gvs = optimizer.compute_gradients(loss)
capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]
train_op = optimizer.apply_gradients(capped_gvs, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
def main(_):
# Set up logging for predictions
tensors_to_log = {'probabilities': 'softmax_tensor', 'classes':'classes', 'seg_txt':'IteratorGetNext:1'}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=1)
#实例化自定义Estimator
# runconfig = tf.estimator.RunConfig(save_summary_steps=1, log_step_count_steps=1)
classifier = tf.estimator.Estimator(model_fn=model_fn, model_dir='./attention_based_std_model_dir')
#训练Estimator
# classifier.train(input_fn=lambda :input_fn('./train_hdf5_samples', epochs=5), hooks=[tf_debug.LocalCLIDebugHook(), logging_hook], steps=10)
# classifier.train(input_fn=lambda: input_fn('./train_hdf5_samples', epochs=5))
#训练集样本数16万234个
for i in range(TOTAL_CYCLE):
classifier.train(input_fn=lambda :input_fn('./train_hdf5_samples', epochs=3, isshuffle=True), hooks=[logging_hook], steps=1000)
print('{}th/{} eval'.format(i+1, TOTAL_CYCLE))
classifier.evaluate(input_fn=lambda: input_fn('./eval_hdf5_samples', epochs=1, isshuffle=False), hooks=[logging_hook])
print('Eval complete')
if __name__ == '__main__':
tf.app.run(main)
| null |
reference/icassp2018_attention_based_std.py
|
icassp2018_attention_based_std.py
|
py
| 13,895 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.environ",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "random.shuffle",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "tensorflow.data.Dataset.from_generator",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "tensorflow.data",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.string",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.float32",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.int32",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.TensorShape",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "tensorflow.TensorShape",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "tensorflow.TensorShape",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "tensorflow.TensorShape",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "tensorflow.equal",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "tensorflow.size",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "tensorflow.TensorShape",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "tensorflow.TensorShape",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "tensorflow.equal",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "tensorflow.size",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "tensorflow.contrib.rnn.BasicLSTMCell",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "tensorflow.contrib",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.contrib.rnn.MultiRNNCell",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "tensorflow.contrib",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.float32",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn.dynamic_rnn",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.float32",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.backend.batch_dot",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.norm",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "tensorflow.exp",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "tensorflow.sequence_mask",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.reduce_sum",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_sum",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "tensorflow.name_scope",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "tensorflow.contrib.rnn.MultiRNNCell",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "tensorflow.contrib",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.float32",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn.dynamic_rnn",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.float32",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.range",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "tensorflow.stack",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "tensorflow.gather_nd",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "tensorflow.variable_scope",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "tensorflow.zeros",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.name_scope",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "tensorflow.concat",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "tensorflow.layers.dense",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "tensorflow.layers",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.layers.dense",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "tensorflow.layers",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.layers.dense",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "tensorflow.layers",
"line_number": 200,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn",
"line_number": 200,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.layers.dense",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "tensorflow.layers",
"line_number": 201,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.argmax",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.softmax",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 206,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.estimator",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.estimator.EstimatorSpec",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "tensorflow.estimator",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.one_hot",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "tensorflow.losses.softmax_cross_entropy",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "tensorflow.losses",
"line_number": 215,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.metrics.accuracy",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "tensorflow.metrics",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.metrics.precision",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "tensorflow.metrics",
"line_number": 219,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.metrics.recall",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "tensorflow.metrics",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.scalar",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 221,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.scalar",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 222,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.scalar",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 223,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.estimator",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.estimator.EstimatorSpec",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "tensorflow.estimator",
"line_number": 228,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.estimator",
"line_number": 231,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.AdamOptimizer",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.clip_by_value",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.get_global_step",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 237,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.estimator.EstimatorSpec",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "tensorflow.estimator",
"line_number": 238,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.LoggingTensorHook",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 243,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.estimator.Estimator",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "tensorflow.estimator",
"line_number": 247,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.app.run",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "tensorflow.app",
"line_number": 259,
"usage_type": "attribute"
}
] |
524313640
|
# This script contains the functions used for plotting different plots in
# other scripts used for evaluation.
# Author: Shweta Narkhede and Camilo Salcedo
# Created on: Oct 24th, 2020
# Edited by: Benjamin Mitchell, Quinn Hull
# Edited on: Nov 15th, 2020
# %%
import pandas as pd
import os
import numpy as np
import matplotlib.pyplot as plt
import eval_functions as ef
import seaborn as sns
# %% Functions
def get_histogram(savepath, forecasts, obs_week, week):
"""Get Histograms:
-----------------------------------
This function plots histograms of predicted weekly flow data and
the count of the flow value prediction. It also plots observed weekly
data for last week for comaparision.
-----------------------------------
Parameters:
forecasts = array
every student's forecast for either week 1 or 2
obs_week = float
provides week's observed flow
week = Week number for the forecast (1 or 2)
-----------------------------------
Outputs:
figure of Histogram plot
"""
fig2 = plt.figure()
fig2.set_size_inches(8, 6)
plt.hist(forecasts, bins=120, color='blue', alpha=0.75,
label='Student Guesses')
histogram = plt.plot([obs_week]*3, np.arange(0, 3, 1), color='red',
linestyle='-', label='Actual mean')
title_string = 'Student guesses for Week '+str(week)
plt.title(title_string)
plt.xlabel('Flow Forecast (cfs)')
plt.ylabel('Count')
plt.legend(loc='upper left')
fig2.patch.set_facecolor('xkcd:white')
plt.tight_layout()
plt.show()
fig2.savefig(savepath)
return histogram
def get_simpleplot(savepath, forecasts, obs_week, week):
"""Get Simple plot:
------------------------------------
This function plots a simple line plot of student's weekly averaged
forecast for a week
------------------------------------
Parameters:
forecasts = array
provides weekly forecasted flow of each student
obs_week = float
week's observed flow
week = string
Week number for the forecast (1 or 2)
------------------------------------
Outputs: figure of simple line plot
"""
# Get the array of firstnames for the plot
firstnames = ef.getFirstNames()
fig3, ax = plt.subplots()
fig3.set_size_inches(10, 4)
clean_forecasts = [x for x in forecasts if not np.isnan(x)]
class_avg = np.mean(clean_forecasts)
simple_plot = ax.plot(forecasts, '-g', label='Forecast', alpha=.8)
plt.axhline(y=class_avg, linestyle='dashed',
label='Class Avg', alpha=.8, color='red')
plt.axhline(y=obs_week, linestyle='dotted', label='Observed',
alpha=.8, color='blue')
plt.xticks(ticks = np.arange(0, 19, 1), labels = firstnames, rotation = 60)
title_string = 'Week '+str(week)+' Forecasts'
ax.set(title=title_string, xlabel="Students",
ylabel="Weekly Avg Flow [cfs]")
ax.legend(fancybox=True, framealpha=1, shadow=True,
borderpad=1)
fig3.patch.set_facecolor('xkcd:white')
plt.tight_layout()
plt.show()
fig3.savefig(savepath)
return simple_plot
def plot_class_forecasts(df, week_flows, leadtime, type_plot):
""" plot_class_forecasts()
---------------------------------------------------------------------
This function plots the forecasts submitted by each student for both
Week 1 & 2 Forecasts. In addition, is capable of plotting the absolute
error in regards the observed value.
---------------------------------------------------------------------
Parameters:
df = Dataframe
Includes the weekly forecast values for Week 1 and 2 for each student.
week_flows = Dataframe
Observed flows per week obtained from USGS.
leadtime: int
leadtime for the forecast. It can only be 1 or 2
type_plot: string
Enter 'forecasts' to plot all submitted values, or 'abs_error'
to plot the deviation from the observed value.
---------------------------------------------------------------------
Outputs: Plot of the forecasted values or the absolute error depending the
user input
"""
# Request the parameters for the plots
y_low = (input('Please introduce the lower limit for y-Axis (Hit enter for \
default value 0):'))
y_max = (input('Please introduce the upper limit for y-Axis (Hit enter for \
default values):'))
plot_weeks_inp = input('Please introduce the list of weeks to consider as \
["Week #", "Week #", ...]. Otherwise, if you want to include all weeks\
hit enter:')
if plot_weeks_inp == '':
column_weeks = [i for i in df.columns]
else:
column_weeks = [i for i in df.columns if i in plot_weeks_inp]
# Markers for the plot
markers = ['o', 'v', '^', 'D', '>', 's', 'P', 'X', '<', '>',
'X', 'o', 'v', 's', '^', 'P', '<', 'D', 's']
# Get the array of firstnames for the plot
firstnames = ef.getFirstNames()
# Trim and set index the same weekly flow (start 8/23)
weekly_flows = week_flows.iloc[1:len(column_weeks) + 1, 3:4]
weekly_flows.set_index(df.columns, append=False, inplace=True)
# Assign values depending the plot type selected
if type_plot == 'abs_error':
df = df.T.subtract(weekly_flows['observed'], axis=0).T
plot_ylabel = "Deviation from Weekly Avg Flow [cfs]"
plot_title = 'Absolute Error in '+str(leadtime) + ' Week Forecast for \n\
HAS-Tools Class'
elif type_plot == 'forecast':
plot_ylabel = "Weekly Avg Flow [cfs]"
plot_title = str(leadtime)+' Week Forecast for HAS-Tools Class \n '
# Plotting process
fig, ax = plt.subplots()
ax.plot(df.T)
for i, line in enumerate(ax.get_lines()):
line.set_marker(markers[i])
# Plot observed flow if the selected plot is the forecast
if type_plot == 'forecast':
ax.plot(column_weeks, weekly_flows['observed'], color='black',
marker='o', linestyle='--', linewidth=3)
plot_labels = firstnames + ['Observed Flow']
elif type_plot == 'abs_error':
plot_labels = firstnames
# Format for labels and plot title
ax.set_xlabel('Weeks \n', fontsize=13, fontweight='bold')
ax.set_ylabel(plot_ylabel, fontsize=13, fontweight='bold')
ax.set_title(plot_title, fontsize=15, fontweight='bold')
# Assigns the limits for y-axis based on user's input
if y_low == '' and y_max != '':
ax.set_ylim(df[column_weeks].min().min(), float(y_max))
elif y_max == '' and y_low != '':
ax.set_ylim(float(y_low), df[column_weeks].max().max())
elif y_max == '' and y_low == '':
ax.set_ylim(df[column_weeks].min().min(), df[column_weeks].max().max())
else:
ax.set_ylim(float(y_low), float(y_max))
ax.legend(plot_labels, loc='lower center',
bbox_to_anchor=(.5, -0.4), ncol=6)
fig.set_size_inches(9, 5)
fig.patch.set_facecolor('xkcd:white')
plt.show()
def plot_class_summary(df, week_flows, week, type_plot):
""" plot_class_summary()
---------------------------------------------------------------------
This function plots the summary for the forecasts submitted by the students
for Week 1 & 2 Forecasts. It includes values such as the mean, median, min
and max values, among others. It can be plotted as a box-whiskers plot or a
regular plot.
---------------------------------------------------------------------
Parameters:
df = Dataframe
Includes the weekly forecast values for Week 1 and 2 for each student.
week_flows = Dataframe
Observed flows per week obtained from USGS.
week: int
The week for the forecast. It can only be 1 or 2
type_plot: string
Enter 'box' to plot the summary using a Box-Whiskers plot or
'plot' to plot it as a regular plot.
---------------------------------------------------------------------
Outputs: Plot showing the main properties of the forecast entries for HAS
Tools class as a either a Box-Whiskers plot or a regular plot
depending the user input
"""
# Request the plotting parameters
y_low = (input('Please introduce the lower limit for y-Axis (Hit enter for \
default value 0):'))
y_max = (input('Please introduce the upper limit for y-Axis (Hit enter for \
default values):'))
plot_weeks_inp = input('Please introduce the list of weeks to consider as \
["Week #", "Week #", ...]. Otherwise, if you want to include all weeks\
hit enter:')
if plot_weeks_inp == '':
column_weeks = [i for i in df.columns]
else:
column_weeks = [i for i in df.columns if i in plot_weeks_inp]
# Plotting process depending on the type of plot selected
if type_plot == 'box':
fig, ax = plt.subplots()
# Setup of the features of the boxplot
boxprops = dict(linestyle='-', linewidth=0.8, color='#00145A',
facecolor='white')
capprops = dict(color='#00145A')
whiskerprops = dict(color='#00145A', linestyle='--')
medianprops = dict(linewidth=1.2, color='#E80B5F')
# Plot boxplot and stripplot and set labels and title
total_data = pd.melt(df[column_weeks])
ax = sns.boxplot(x='variable', y='value', data=total_data,
linewidth=0.8, width=0.4, showfliers=False,
whiskerprops=whiskerprops, color='w', boxprops=boxprops,
medianprops=medianprops, capprops=capprops)
ax = sns.stripplot(x='variable', y='value', data=total_data,
jitter=True, alpha=0.5)
ax.set_ylabel('Flow (cfs)', fontsize=13, fontweight='bold')
ax.set_xlabel('\n Weeks', fontsize=13, fontweight='bold')
ax.set_title('Weekly Discharge Prediction for Week'+str(week)+'\n',
fontsize=15, fontweight='bold')
# Assigns the limits for y-axis based on user's input
if y_low == '' and y_max != '':
ax.set_ylim(0, float(y_max))
elif y_max == '' and y_low != '':
ax.set_ylim(float(y_low), df[column_weeks].max().max())
elif y_max == '' and y_low == '':
ax.set_ylim(0, df[column_weeks].max().max())
else:
ax.set_ylim(float(y_low), float(y_max))
# Plot mean and observed values
ax.plot(np.mean(df[column_weeks]), linestyle='dashed', linewidth=1.5,
marker='o', markersize=4, color='#0E6FDC',
label='Class Average')
ax.plot(column_weeks, week_flows['observed'][1:len(column_weeks)+1],
color='black', marker='o', linestyle='--', markersize=4,
label='Observed')
# Legend
ax.legend(loc='lower center',
bbox_to_anchor=(.5, -0.4), ncol=5)
elif type_plot == 'plot':
plt.style.use('seaborn-whitegrid')
# Plot boxplot and stripplot and set labels and title
ay = plt.plot(column_weeks, df[column_weeks].mean(), marker='o',
label='Class Average')
ay = plt.plot(column_weeks, df[column_weeks].quantile(0.25), marker='o',
label='Lower Quantile')
ay = plt.plot(column_weeks, df[column_weeks].quantile(0.75), marker='o',
label='Upper Quantile')
ay = plt.plot(column_weeks, df[column_weeks].min(), marker='o',
label='Min')
ay = plt.plot(column_weeks, df[column_weeks].max(), marker='o',
label='Max')
ay = plt.plot(column_weeks, week_flows['observed'][1:len(column_weeks)+1], color='black', marker='o', linestyle='--',
label='Observed')
plt.ylabel('Flow (cfs)', fontsize=13, fontweight='bold')
plt.xlabel('\n Weeks', fontsize=13, fontweight='bold')
plt.title('Weekly Discharge Prediction for Week # '+str(week)+'\n',
fontsize=15, fontweight='bold')
# Assigns the limits for y-axis based on user's input
if y_low == '' and y_max != '':
plt.ylim(0, float(y_max))
elif y_max == '' and y_low != '':
plt.ylim(float(y_low), df[column_weeks].max().max())
elif y_max == '' and y_low == '':
plt.ylim(0, df[column_weeks].max().max())
else:
plt.ylim(float(y_low), float(y_max))
# Legend
plt.legend(loc='lower center', bbox_to_anchor=(0.5, -0.4), ncol=3)
fig.patch.set_facecolor('xkcd:white')
#%%
# Week 10 Additions
def plot_seasonal_rmse(savepath, seasonal_rmse):
"""Seasonal Root Mean Square Error:
-----------------------------------
This function plots the root mean square error of seasonal flow
data for week 1 to the most recent entry.
You have the option of entering in minumum and maximum y values.
-----------------------------------
Parameters:
Seasonal_rmse = pandas dataframe
every student's seasonal root meet square error
-----------------------------------
Output:
Figure of long term weekly prediction root mean square errors
"""
# Request the parameters for the plots
y_low = (input('Please introduce the lower limit for y-Axis (Hit enter for \
default value 0):'))
y_max = (input('Please introduce the upper limit for y-Axis (Hit enter for \
default values):'))
column_weeks = [i for i in seasonal_rmse.columns]
# Markers for the plot
markers = ['o', 'v', '^', 'D', '>', 's', 'P', 'X', '<', '>',
'X', 'o', 'v', 's', '^', 'P', '<', 'D', 's']
# Get the array of firstnames for the plot
firstnames = ef.getFirstNames()
# plotting
fig10, ax = plt.subplots()
ax.plot(seasonal_rmse)
for i, line in enumerate(ax.get_lines()):
line.set_marker(markers[i])
ax.set_xlabel('Weeks', fontsize=13, fontweight='bold')
ax.set_ylabel("Root Mean Square Error", fontsize=13, fontweight='bold')
ax.set_title("Seasonal Root Mean Square Error", fontsize=13,
fontweight='bold')
# Assigns the limits for y-axis based on user's input
if y_low == '' and y_max != '':
ax.set_ylim(seasonal_rmse[column_weeks].min().min(), float(y_max))
elif y_max == '' and y_low != '':
ax.set_ylim(float(y_low), seasonal_rmse[column_weeks].max().max())
elif y_max == '' and y_low == '':
ax.set_ylim(seasonal_rmse[column_weeks].min().min(),
seasonal_rmse[column_weeks].max().max())
else:
ax.set_ylim(float(y_low), float(y_max))
# showing the legend
ax.legend(firstnames, loc='lower center',
bbox_to_anchor=(.5, -0.4), ncol=6)
fig10.patch.set_facecolor('xkcd:white')
plt.show()
fig10.savefig(savepath)
def rmse_histogram(savepath, weekly_rmse):
"""Root Mean Square Error Histogram:
-----------------------------------
This function plots a histogram of the root mean square error
of weekly flow data
-----------------------------------
Parameters:
weekly_rmse = pandas dataframe
every student's weekly root meet square error
-----------------------------------
Outputs:
Histogram plot of week 1 and 2 root mean square errors
"""
fig11 = plt.figure()
plt.hist(weekly_rmse.iloc[:, 0], bins=20, rwidth=0.8, color='green',
alpha=0.3, label='Week 1')
plt.hist(weekly_rmse.iloc[:, 1], bins=20, rwidth=0.8, color='red',
alpha=0.3, label='Week 2')
plt.xlabel('Root Mean Square Error', fontweight='bold')
plt.ylabel('Frequency', fontweight='bold')
plt.title('Weekly Root Mean Square Errors', fontweight='bold')
plt.legend()
fig11.patch.set_facecolor('xkcd:white')
plt.tight_layout()
plt.show()
fig11.savefig(savepath)
# %%
# Week 12 Additions
def noIinTEAM(savepath, class_list, obs_week, oneweek_forecasts, twoweek_forecasts, bar_width):
# Gettting team names for data collection
team1 = ['Adam', 'Lourdes', 'Patrick', 'Ben']
team2 = ['Alcely', 'Shweta', 'Richard', 'Scott']
team3 = ['Camilo', 'Diana', 'Xenia', 'Danielle']
team4 = ['Alexa', 'Quinn', 'Abigail']
team5 = ['Jill', 'Mekha', 'Jake']
team_names = ['Big_Brain_Squad', 'Team_SARS', 'Aquaholics',
'Dell_for_the_Win?', 'Team_MJJ']
team_tol = [*team1, *team2, *team3, *team4, *team5]
class_pre_dict = pd.DataFrame({'oneweek_forecasts':oneweek_forecasts,
'twoweek_forecasts':twoweek_forecasts},
index = class_list,
columns = ['oneweek_forecasts', 'twoweek_forecasts'])
# Organizing by team name
Big_Brain_Squad = class_pre_dict.loc[team1]
Team_SARS = class_pre_dict.loc[team2]
Aquaholics = class_pre_dict.loc[team3]
Dell_for_the_Win = class_pre_dict.loc[team4]
Team_MJJ = class_pre_dict.loc[team5]
#Ploting time!
x = np.arange(0, 18, 1)
fig12 = plt.figure()
fig12.set_size_inches(25, 8)
ax = fig12.add_subplot()
w = bar_width
plt.xticks(x + w/2, team_tol, rotation = 60, fontsize=15)
plt.yticks(fontsize=15)
ax.bar(x[0:4], Big_Brain_Squad.oneweek_forecasts, width=w, align='center', label = 'team1')
ax.bar(x[0:4]+w, Big_Brain_Squad.twoweek_forecasts, width=w, align='center', label = 'single1')
ax.bar(x[4:8], Team_SARS.oneweek_forecasts, width=w, align='center', label = 'team2')
ax.bar(x[4:8]+w, Team_SARS.twoweek_forecasts, width=w, align='center', label = 'single2')
ax.bar(x[8:12], Aquaholics.oneweek_forecasts, width=w, align='center', label = 'team3')
ax.bar(x[8:12]+w, Aquaholics.twoweek_forecasts, width=w, align='center', label = 'single3')
ax.bar(x[12:15], Dell_for_the_Win.oneweek_forecasts, width=w, align='center', label = 'team4')
ax.bar(x[12:15]+w, Dell_for_the_Win.twoweek_forecasts, width=w, align='center', label = 'single4')
ax.bar(x[15:18], Team_MJJ.oneweek_forecasts, width=w, align='center', label = 'team5')
ax.bar(x[15:18]+w, Team_MJJ.twoweek_forecasts, width=w, align='center', label = 'single5')
ax.axhline(y=obs_week, linewidth=2, linestyle = '--', color='k')
plt.xlabel('Student', fontsize=15)
plt.ylabel('Average Flow', fontsize=15)
ax.legend( loc='lower center', fontsize=20,
bbox_to_anchor=(.5, -0.4), ncol=5)
plt.text(0.7, obs_week, 'Observed Flow', fontsize=21)
fig12.patch.set_facecolor('xkcd:white')
plt.tight_layout()
plt.show()
fig12.savefig(savepath)
def last_2_weeks(savepath, obs_week, oneweek_forecasts, twoweek_forecasts, bar_width):
# Get the array of firstnames for the plot
firstnames = ef.getFirstNames()
class_forecasts = pd.DataFrame({'oneweek_forecasts':oneweek_forecasts,
'twoweek_forecasts':twoweek_forecasts},
index = firstnames,
columns = ['oneweek_forecasts', 'twoweek_forecasts'])
stu = np.arange(0, 19, 1)
fig13 = plt.figure()
fig13.set_size_inches(25, 8)
ax = fig13.add_subplot()
w = bar_width
plt.xticks(stu + w/2, firstnames, rotation = 60, fontsize=15)
ax.bar(stu, class_forecasts.oneweek_forecasts, width=w, align='center', label = 'week1')
ax.bar(stu+w, class_forecasts.twoweek_forecasts, width=w, align='center', label = 'week2')
ax.axhline(y=obs_week, linewidth=2, linestyle = '--', color='k')
plt.xlabel('Student', fontsize=15)
plt.ylabel('Average Flow', fontsize=15)
ax.legend( loc='lower center', fontsize=20,
bbox_to_anchor=(.5, -0.4), ncol=5)
plt.text(0.7, obs_week, 'Observed Flow', fontsize=21)
fig13.patch.set_facecolor('xkcd:white')
plt.tight_layout()
plt.show()
fig13.savefig(savepath)
def last_2_weeks_diff(savepath, obs_week, oneweek_forecasts, twoweek_forecasts, bar_width):
# Get the array of firstnames for the plot
firstnames = ef.getFirstNames()
class_forecasts = pd.DataFrame({'oneweek_forecasts':oneweek_forecasts,
'twoweek_forecasts':twoweek_forecasts},
index = firstnames,
columns = ['oneweek_forecasts', 'twoweek_forecasts'])
class_forecasts.insert(2, 'Diff_1', np.array(class_forecasts['oneweek_forecasts'] - obs_week), True)
class_forecasts.insert(3, 'Diff_2', np.array(class_forecasts['twoweek_forecasts'] - obs_week), True)
# Plotting Diff
stu = np.arange(0, 19, 1)
fig14 = plt.figure()
fig14.set_size_inches(25, 8)
ax = fig14.add_subplot()
w = bar_width
plt.xticks(stu + w/2, firstnames, rotation = 60, fontsize=15)
ax.bar(stu, class_forecasts.Diff_1, width=w, align='center', label = 'week1')
ax.bar(stu+w, class_forecasts.Diff_2, width=w, align='center', label = 'week2')
ax.axhline(y=0, linewidth=2, linestyle = '--', color='k')
plt.xlabel('Student', fontsize=15)
plt.ylabel('Average Flow', fontsize=15)
ax.legend( loc='lower center', fontsize=20,
bbox_to_anchor=(.5, -0.4), ncol=5)
# plt.text(0, 0, 'Observed Flow', fontsize=21)
fig14.patch.set_facecolor('xkcd:white')
plt.tight_layout()
plt.show()
fig14.savefig(savepath)
# %%
| null |
evaluation_scripts/plot_functions.py
|
plot_functions.py
|
py
| 21,655 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hist",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "eval_functions.getFirstNames",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "numpy.isnan",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.axhline",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axhline",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "eval_functions.getFirstNames",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 236,
"usage_type": "name"
},
{
"api_name": "pandas.melt",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "seaborn.boxplot",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "seaborn.stripplot",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 281,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 281,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 284,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 286,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 288,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 290,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 292,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 294,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 296,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 297,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 298,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 303,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 305,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 307,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 309,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 312,
"usage_type": "name"
},
{
"api_name": "eval_functions.getFirstNames",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 348,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 371,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 389,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hist",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 390,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hist",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 392,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 394,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 395,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 396,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 397,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 397,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 399,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 399,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 400,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 431,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 432,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 432,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 436,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 436,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 437,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 449,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 449,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 450,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 450,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 453,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 453,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 455,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 456,
"usage_type": "name"
},
{
"api_name": "eval_functions.getFirstNames",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 465,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 471,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 471,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 475,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 475,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 479,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 479,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 480,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 480,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 483,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 483,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 485,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 485,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 486,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 486,
"usage_type": "name"
},
{
"api_name": "eval_functions.getFirstNames",
"line_number": 494,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 495,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 500,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 501,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 504,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 505,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 505,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 509,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 509,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 513,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 513,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 514,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 514,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 519,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 519,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 520,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 520,
"usage_type": "name"
}
] |
274023298
|
from tkinter import *
from functools import partial
def click(botao):
print(botao['text'])
janela = Tk()
bt1 = Button(janela, width=20, text='Botão 1')
bt1['command'] = partial(click, bt1)
bt1.place(x = 100, y = 100)
bt2 = Button(janela, width=20, text='Botão 2')
bt2['command'] = partial(click, bt2)
bt2.place(x = 100, y = 130)
janela.geometry('300x300+200+200')
janela.mainloop()
| null |
aula7.py
|
aula7.py
|
py
| 411 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "functools.partial",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 13,
"usage_type": "call"
}
] |
100619939
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
def convert2cpu(gpu_matrix):
return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)
def bbox_iou(boxes1, boxes2, x1y1x2y2=True):
if x1y1x2y2:
mx = torch.min(boxes1[0], boxes2[0])
Mx = torch.max(boxes1[2], boxes2[2])
my = torch.min(boxes1[1], boxes2[1])
My = torch.max(boxes1[3], boxes2[3])
w1 = boxes1[2] - boxes1[0]
h1 = boxes1[3] - boxes1[1]
w2 = boxes2[2] - boxes2[0]
h2 = boxes2[3] - boxes2[1]
else:
mx = torch.min(boxes1[0] - boxes1[2] / 2.0, boxes2[0] - boxes2[2] / 2.0)
Mx = torch.max(boxes1[0] + boxes1[2] / 2.0, boxes2[0] + boxes2[2] / 2.0)
my = torch.min(boxes1[1] - boxes1[3] / 2.0, boxes2[1] - boxes2[3] / 2.0)
My = torch.max(boxes1[1] + boxes1[3] / 2.0, boxes2[1] + boxes2[3] / 2.0)
w1 = boxes1[2]
h1 = boxes1[3]
w2 = boxes2[2]
h2 = boxes2[3]
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
mask = ((cw <= 0) + (ch <= 0) > 0)
area1 = w1 * h1
area2 = w2 * h2
carea = cw * ch
carea[mask] = 0
uarea = area1 + area2 - carea
return carea / uarea
def build_label(pred_boxes, target, anchors, num_anchors, nH, nW, noobject_scale, object_scale, sil_thresh, seen=15000):
nB = target.size(0) #batch=4
nA = num_anchors #5
anchor_step = int(len(anchors) / num_anchors) #2
conf_mask = torch.ones(nB, nA, nH, nW) * noobject_scale
coord_mask = torch.zeros(nB, nA, nH, nW)
cls_mask = torch.zeros(nB, nA, nH, nW)
tx = torch.zeros(nB, nA, nH, nW)
ty = torch.zeros(nB, nA, nH, nW)
tw = torch.zeros(nB, nA, nH, nW)
th = torch.zeros(nB, nA, nH, nW)
tconf = torch.zeros(nB, nA, nH, nW)
tcls = torch.zeros(nB, nA, nH, nW)
lr_mask = torch.zeros(nB, nA, nH, nW)
tconf_lr = torch.zeros(nB, nA, nH, nW)
nAnchors = nA * nH * nW
nPixels = nH * nW
for b in range(nB):
cur_pred_boxes = pred_boxes[b * nAnchors:(b + 1) * nAnchors].t()
cur_ious = torch.zeros(nAnchors)
for t in range(50):
if target[b][t * 6 + 1] == 0:
break
gx = target[b][t * 6 + 1] * nW
gy = target[b][t * 6 + 2] * nH
gw = target[b][t * 6 + 3] * nW
gh = target[b][t * 6 + 4] * nH
cur_gt_boxes = torch.FloatTensor([gx, gy, gw, gh]).repeat(nAnchors, 1).t()
cur_ious = torch.max(cur_ious, bbox_iou(cur_pred_boxes, cur_gt_boxes, x1y1x2y2=False))
cur_ious = cur_ious.view(nA, nH, nW)
conf_mask[b][cur_ious > sil_thresh] = 0
if seen < 12800:
tx.fill_(0.5)
ty.fill_(0.5)
tw.zero_()
th.zero_()
coord_mask.fill_(1)
nGT = 0
nCorrect = 0
for b in range(nB):
for t in range(50):
if target[b][t * 6 + 1] == 0:
break
nGT = nGT + 1
best_iou = 0.0
best_n = -1
min_dist = 10000
gx = target[b][t * 6 + 1] * nW
gy = target[b][t * 6 + 2] * nH
gi = int(gx)
gj = int(gy)
gw = target[b][t * 6 + 3] * nW
gh = target[b][t * 6 + 4] * nH
gt_box = torch.FloatTensor([0, 0, gw, gh])
for n in range(nA):
aw = anchors[anchor_step * n]
ah = anchors[anchor_step * n + 1]
anchor_box = torch.FloatTensor([0, 0, aw, ah])
iou = bbox_iou(anchor_box, gt_box, x1y1x2y2=False)
if iou > best_iou:
best_iou = iou
best_n = n
gt_box = torch.FloatTensor([gx, gy, gw, gh])
pred_box = pred_boxes[b * nAnchors + best_n * nPixels + gj * nW + gi]
coord_mask[b][best_n][gj][gi] = 1
cls_mask[b][best_n][gj][gi] = 1
conf_mask[b][best_n][gj][gi] = object_scale
tx[b][best_n][gj][gi] = target[b][t * 6 + 1] * nW - gi
ty[b][best_n][gj][gi] = target[b][t * 6 + 2] * nH - gj
tw[b][best_n][gj][gi] = math.log(gw / anchors[anchor_step * best_n])
th[b][best_n][gj][gi] = math.log(gh / anchors[anchor_step * best_n + 1])
iou = bbox_iou(gt_box, pred_box, x1y1x2y2=False) # best_iou
tconf[b][best_n][gj][gi] = iou
lr_mask[b][best_n][gj][gi] = 1
tconf_lr[b][best_n][gj][gi] = target[b][t * 6 + 5]
tcls[b][best_n][gj][gi] = target[b][t * 6]
if iou > 0.5:
nCorrect = nCorrect + 1
return nGT, nCorrect, coord_mask, conf_mask, cls_mask, tx, ty, tw, th, tconf, tcls, lr_mask, tconf_lr
class loss_v2(nn.Module):
def __init__(self, num_classes=2, anchors=[], num_anchors=5):
super(loss_v2, self).__init__()
self.num_classes = num_classes # 20,80
self.anchors = anchors #40,15, 90,45, 120,55, 190,65, 220,88
self.num_anchors = num_anchors #5
self.anchor_step = int(len(anchors) / num_anchors) #2
self.coord_scale = 1
self.noobject_scale = 1
self.object_scale = 5
self.class_scale = 1
self.thresh = 0.6
def forward(self, output, target):
# output : BxAs*(4+1+num_classes)*H*W
nB = output.data.size(0) #batch=4
nA = self.num_anchors #5
nC = self.num_classes #2
nH = output.data.size(2) #13
nW = output.data.size(3) #13
output = output.view(nB, nA, (5 + nC + 1), nH, nW)
x = torch.sigmoid(output.index_select(2, torch.tensor([0]).cuda()).view(nB, nA, nH, nW))
y = torch.sigmoid(output.index_select(2, torch.tensor([1]).cuda()).view(nB, nA, nH, nW))
w = output.index_select(2, torch.tensor([2]).cuda()).view(nB, nA, nH, nW)
h = output.index_select(2, torch.tensor([3]).cuda()).view(nB, nA, nH, nW)
conf = torch.sigmoid(output.index_select(2, torch.tensor([4]).cuda()).view(nB, nA, nH, nW))
cls = output.index_select(2, torch.linspace(5, 5 + nC - 1, nC).long().cuda())
cls = cls.view(nB * nA, nC, nH * nW).transpose(1, 2).contiguous().view(nB * nA * nH * nW, nC)
conf_lr = torch.sigmoid(output.index_select(2, torch.tensor([5 + nC]).cuda()).view(nB, nA, nH, nW))
pred_boxes = torch.cuda.FloatTensor(4, nB * nA * nH * nW)
# pred_boxes = torch.zeros(size = (4, nB * nA * nH * nW)).cuda()
grid_x = torch.linspace(0, nW - 1, nW).repeat(nH, 1).repeat(nB * nA, 1, 1).view(nB * nA * nH * nW).cuda()
grid_y = torch.linspace(0, nH - 1, nH).repeat(nW, 1).t().repeat(nB * nA, 1, 1).view(nB * nA * nH * nW).cuda()
anchor_w = torch.Tensor(self.anchors).view(nA, self.anchor_step).index_select(1, torch.LongTensor([0])).cuda()
anchor_h = torch.Tensor(self.anchors).view(nA, self.anchor_step).index_select(1, torch.LongTensor([1])).cuda()
anchor_w = anchor_w.repeat(nB, 1).repeat(1, 1, nH * nW).view(nB * nA * nH * nW)
anchor_h = anchor_h.repeat(nB, 1).repeat(1, 1, nH * nW).view(nB * nA * nH * nW)
xv = x.view(nB * nA * nH * nW)
yv = y.view(nB * nA * nH * nW)
wv = w.view(nB * nA * nH * nW)
hv = h.view(nB * nA * nH * nW)
pred_boxes[0] = xv.data + grid_x
pred_boxes[1] = yv.data + grid_y
pred_boxes[2] = torch.exp(wv.data) * anchor_w
pred_boxes[3] = torch.exp(hv.data) * anchor_h
pred_boxes = convert2cpu(pred_boxes.transpose(0, 1).contiguous().view(-1, 4))
nGT, nCorrect, coord_mask, conf_mask, cls_mask, tx, ty, tw, th, tconf, tcls , lr_mask, tconf_lr\
= build_label(pred_boxes, target.data, self.anchors, nA, nH, nW, self.noobject_scale, self.object_scale, self.thresh)
cls_mask = (cls_mask == 1)
nProposals = int((conf > 0.25).sum())
tx = tx.cuda()
ty = ty.cuda()
tw = tw.cuda()
th = th.cuda()
tconf = tconf.cuda()
tcls = tcls[cls_mask].view(-1).long().cuda()
tconf_lr = tconf_lr.cuda()
coord_mask = coord_mask.cuda()
conf_mask = conf_mask.cuda().sqrt()
cls_mask = cls_mask.view(-1, 1).repeat(1, nC).cuda()
cls = cls[cls_mask].view(-1, nC)
loss_x = self.coord_scale * nn.MSELoss(reduction='sum')(x * coord_mask, tx * coord_mask) / 2.0
loss_y = self.coord_scale * nn.MSELoss(reduction='sum')(y * coord_mask, ty * coord_mask) / 2.0
loss_w = self.coord_scale * nn.MSELoss(reduction='sum')(w * coord_mask, tw * coord_mask) / 2.0
loss_h = self.coord_scale * nn.MSELoss(reduction='sum')(h * coord_mask, th * coord_mask) / 2.0
loss_conf = nn.MSELoss(reduction='sum')(conf * conf_mask, tconf * conf_mask) / 2.0
loss_cls = self.class_scale * nn.CrossEntropyLoss(reduction='sum')(cls, tcls)
lr_mask = lr_mask.cuda()
loss_conf_lr = nn.MSELoss(reduction='sum')(conf_lr * lr_mask, tconf_lr * lr_mask) / 2.0
loss = loss_x + loss_y + loss_w + loss_h + loss_conf + loss_cls + 0.5 * loss_conf_lr
# print('nGT %d, recall %d, proposals %d, loss: x %f, y %f, w %f, h %f, conf %f, cls %f, total %f' % (
# nGT, nCorrect, nProposals, loss_x.item(), loss_y.item(), loss_w.item(), loss_h.item(),
# loss_conf.item(), loss_cls.item(), loss.item()))
return loss / nB
| null |
ears_right/region_loss.py
|
region_loss.py
|
py
| 9,347 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.FloatTensor",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "torch.sigmoid",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "torch.sigmoid",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "torch.sigmoid",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "torch.linspace",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "torch.sigmoid",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "torch.cuda.FloatTensor",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "torch.linspace",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "torch.linspace",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "torch.LongTensor",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "torch.LongTensor",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "torch.exp",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "torch.exp",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 200,
"usage_type": "name"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 205,
"usage_type": "name"
}
] |
323408892
|
#coding: utf-8
from flask import Flask, request, g, render_template, redirect, url_for
from datetime import datetime
import sqlite3, os
app = Flask(__name__)
DATABASE = 'message.db'
@app.route('/')
def index():
cursor = get_db().cursor()
cursor.execute('select * from message order by post_time desc, author')
data = cursor.fetchall()
cursor.close()
return render_template('index.html', data=data)
@app.route('/comment/', methods=['POST'])
def comment():
author = request.form.get('author', None)
message = request.form.get('message', None)
if author is None or message is None:
return u'填写数据非法!'
post_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
cursor = get_db().cursor()
cursor.execute('insert into message(author, message, post_time) values (?, ?, ?)', (author, message, post_time))
cursor.close()
get_db().commit()
return redirect(url_for('index'))
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
db.text_factory = str
return db
@app.teardown_appcontext
def close_database_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def init_db():
if os.path.isfile('message.db'): return
with sqlite3.connect(DATABASE) as conn:
cursor = conn.cursor()
cursor.execute('create table message (id INTEGER PRIMARY KEY, author varchar(20) NOT NULL, message varchar(500) NOT NULL, post_time datetime DEFAULT CURRENT_TIMESTAMP )')
cursor.close()
conn.commit()
if __name__=='__main__':
init_db()
app.run(debug=True)
| null |
0023/0023.py
|
0023.py
|
py
| 1,683 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask.request.form.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "flask.g",
"line_number": 33,
"usage_type": "argument"
},
{
"api_name": "flask.g._database",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "flask.g",
"line_number": 41,
"usage_type": "argument"
},
{
"api_name": "os.path.isfile",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.connect",
"line_number": 47,
"usage_type": "call"
}
] |
234501225
|
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse, HttpResponseRedirect
from .models import Prisoner
from django.template import loader
from django.shortcuts import redirect
from django.contrib.auth.decorators import login_required
@login_required(login_url='/login/', redirect_field_name=None)
def index(request):
prisoner_list = Prisoner.objects.order_by("code")
template = loader.get_template("prisoner_list/prisoners.html")
context = {
"prisoner_list": prisoner_list
}
return HttpResponse(template.render(context, request))
@login_required(login_url='/login/', redirect_field_name=None)
def delete(request, prisoner_id):
Prisoner.objects.get(pk=prisoner_id).delete()
return redirect('/')
@login_required(login_url='/login/', redirect_field_name=None)
def add_prisoner(request):
data = request.POST
code = data.get('code', '')
if not is_int(code):
return HttpResponse("El código no es un número")
else:
code = int(code)
if user_exists(code):
return HttpResponse("El usuario ya existe")
name = data.get('name', '')
date = data.get('date', '')
gender = data.get('gender', '')
race = data.get('race', '')
Prisoner.objects.create(
code=code, name=name, birth_date=date, gender=gender, race=race
).save()
return redirect('/')
@login_required(login_url='/login/', redirect_field_name=None)
def search_prisoner(request):
if request.method == 'POST':
form_data = request.POST
user_input = form_data['user_input']
prisoner_list = Prisoner.objects.filter(name__contains=user_input)
template = loader.get_template("prisoner_list/prisoners.html")
context = {
"prisoner_list": prisoner_list
}
return HttpResponse(template.render(context, request))
else:
redirect('/')
def user_exists(prisoner_id):
prisoner = Prisoner.objects.get(pk=prisoner_id)
return prisoner is not None
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
| null |
prisoner_list/views.py
|
views.py
|
py
| 2,139 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "models.Prisoner.objects.order_by",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "models.Prisoner.objects",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "models.Prisoner",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.template.loader.get_template",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.template.loader",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.Prisoner.objects.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "models.Prisoner.objects",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "models.Prisoner",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "models.Prisoner.objects.create",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "models.Prisoner.objects",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "models.Prisoner",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "models.Prisoner.objects.filter",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "models.Prisoner.objects",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "models.Prisoner",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "django.template.loader.get_template",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "django.template.loader",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "models.Prisoner.objects.get",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "models.Prisoner.objects",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "models.Prisoner",
"line_number": 63,
"usage_type": "name"
}
] |
431578205
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
# ** Read in the Ecommerce Customers csv file as a DataFrame called customers.**
customers = pd.read_csv('Ecommerce Customers')
# **Check the head of customers, and check out its info() and describe() methods.**
customers.head()
customers.info()
customers.describe()
### Exploratory Data Analysis
sns.jointplot(['Time on Website'], ['Yearly Amount Spent'], data=customers)
sns.set_style('whitegrid')
sns.jointplot(['Time on App'], ['Yearly Amount Spent'], data=customers)
sns.jointplot(['Time on App'], ['Yearly Amount Spent'], data=customers, kind='hex')
sns.pairplot(customers)
sns.lmplot('Length of Membership', 'Yearly Amount Spent', data=customers)
# ## Training and Testing Data
from sklearn.model_selection import train_test_split
X = customers[['Avg. Session Length', 'Time on App', 'Time on Website', 'Length of Membership']]
y = customers[['Yearly Amount Spent']]
# ** Use model_selection.train_test_split from sklearn to split the data into training and testing sets.**
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
# ## Training the Model
from sklearn.linear_model import LinearRegression
lm = LinearRegression()
lm.fit(X_train, y_train)
lm.coef_
# ## Predicting Test Data
predict=lm.predict(X_test)
plt.scatter(y_test, predict)
# ## Evaluating the Model
from sklearn import metrics
print('MAE:', metrics.mean_absolute_error(y_test, predict))
print('MSE:', metrics.mean_squared_error(y_test, predict))
print('MSE:', np.sqrt(metrics.mean_squared_error(y_test, predict)))
# ## Residuals
sns.distplot(y_test - predict, bins=50)
# ## Model Coefficients
lm.coef_.transpose()
coeff_df = pd.DataFrame(lm.coef_.transpose(),index=X.columns,columns=['Coefficient'])
coeff_df
| null |
Linear_Regression_ECommerce.py
|
Linear_Regression_ECommerce.py
|
py
| 1,872 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "seaborn.jointplot",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "seaborn.set_style",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "seaborn.jointplot",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "seaborn.jointplot",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "seaborn.pairplot",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "seaborn.lmplot",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.mean_absolute_error",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.mean_squared_error",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "numpy.sqrt",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.mean_squared_error",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "seaborn.distplot",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 62,
"usage_type": "call"
}
] |
57569841
|
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_score
from sklearn import svm
import pandas as pd
# Purpose: Use svm Classifier and k-fold cross-validation. And parameter optimization with Nested cross validation(GridSearchCV).
# Load data to pandas' Dataframe.
iris_data = load_iris()
target = pd.DataFrame(data=iris_data.target)
data = pd.DataFrame(data=iris_data.data, columns=iris_data.feature_names)
# Split train and test data.
x_train, x_test, y_train, y_test = train_test_split(data, target, test_size=0.25, random_state=0)
# Normalization & Standardization
std = StandardScaler()
x_train = std.fit_transform(x_train)
x_test = std.transform(x_test)
classifier = svm.SVC(C=30, kernel='rbf', gamma=10, decision_function_shape='ovr') # ovr: one to many
classifier.fit(x_train,y_train.values.ravel())
c_score = cross_val_score(classifier, x_train, y_train.values.ravel(), cv=6)
print("6-flod score:" + str(c_score))
print("6-flod mean_socre:" + str(c_score.mean()))
print("Test_dataset score:", classifier.score(x_train, y_train))
print("Train_dataset score:", classifier.score(x_test, y_test))
# Parameter optimization
# method one : use GridSearchCV (A Nested cross validation method)
classifier = svm.SVC(kernel='rbf', decision_function_shape='ovr') # Generate SVM
params = {"C": range(1, 10), "gamma": range(1, 11)}
gridCv = GridSearchCV(classifier, param_grid=params, cv=6) # 6-fold nested cross-validation
gridCv.fit(x_train, y_train.values.ravel())
print("\nAccuracy :", gridCv.score(x_test, y_test))
print("Best Cross score", gridCv.best_score_)
print("Best modle:", gridCv.best_estimator_)
# method two : Manual calculation
score = []
for C in range(1, 10):
for gamma in range(1, 11):
classifier = svm.SVC(C=C, kernel='rbf', gamma=gamma, decision_function_shape='ovr') # ovr: One-to-many strategy
classifier.fit(x_train, y_train.values.ravel())
cross_score = cross_val_score(classifier, x_train, y_train.values.ravel(), cv=6) # 6-fold cross-validation
score.append((classifier.score(x_train, y_train), classifier.score(x_test, y_test)))
print("\nAccuracy :", max([x[1] for x in score]))
print("Best Cross score", max([x.mean() for x in cross_score]))
# Next, to find the parameter corresponding to this maximum value, it is a bit more troublesome than the previous method
# And I found that the results of this method are not exactly the same, but they are similar.
| null |
320180941841-wuyang/homework11/iris_process.py
|
iris_process.py
|
py
| 2,642 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sklearn.datasets.load_iris",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVC",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sklearn.svm",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "sklearn.model_selection.cross_val_score",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVC",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sklearn.svm",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "sklearn.model_selection.GridSearchCV",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVC",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "sklearn.svm",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "sklearn.model_selection.cross_val_score",
"line_number": 47,
"usage_type": "call"
}
] |
174409185
|
#
# Usage:
# import runtest
# runtest.run(testcommands, outputfile,
# menuprompt=None, default "OS/161 kernel [? for menu]: "
# shellprompt=None, default "OS/161$ "
# conf=None, default is sys161 default behavior
# ram=None, default is per sys161 config
# cpus=None, default is per sys161 config
# doom=None, default is no doom counter
# progress=30, default is 30 seconds
# timeout=300, default is 300 seconds
# kernel=None) default is "kernel"
#
# Returns None on success or a (string) message if something apparently
# went wrong in the middle. (XXX: should it throw exceptions instead?)
#
# * The testcommands argument is a string containing a list of commands
# separated by semicolons. These can be either kernel menu commands
# or shell commands; the command 's' is recognized for switching from
# the menu to the shell and 'exit' for switching back to the menu.
# (This affects waiting for prompts - running the shell via 'p' or
# crashing out of the shell will confuse things.)
#
# The command 'q' from the menu is also recognized as causing a
# shutdown. This will be done automatically after everything else if
# not issued explicitly.
#
# The following commands are interpreted as macros:
# DOMOUNT expands to "mount sfs lhd1:; cd lhd1:"
# DOUNMOUNT expands to "cd /; unmount lhd1:"
# WAIT sleeps 3 seconds and just presses return
#
# * The outputfile argument should be a python file (e.g. sys.stdout)
# and receives a copy of the System/161 output.
#
# * The menuprompt and shellprompt arguments can be used to change the
# menu and shell prompt strings looked for. For the moment these can
# only be fixed strings, not regular expressions. (This is probably
# easy to improve, but I ran into some mysterious problems when I
# tried, so YMMV.) By default if you pass None prompt strings matching
# what OS/161 issues by default are used.
#
# * The conf argument can be used to supply an alternate sys161.conf
# file. If None is given (the default), sys161 will use its default
# config file.
#
# * The ram and cpus arguments can be used to override the RAM size
# and number-of-cpus settings in the sys161 config file. The number of
# cpus must be an integer, but any RAM size specification understood
# by sys161 can be used. Note: this feature requires System/161 2.0.5
# or higher.
#
# * The doom argument can be used to set the doom counter. If None is
# given (the default) the doom counter is not engaged.
#
# * The progress and timeout arguments can be used to set the timeouts
# for System/161 progress monitoring and pexpect-level global timeout,
# respectively. The defaults (somewhat arbitraily chosen) are 30 and
# 300 seconds. Passing progress=None disables progress monitoring; this
# is necessary for nontrivial tests that run within the kernel, as
# progress monitoring measures userland progress. Passing timeout=None
# probably either disables the global timeout or makes pexpect crash;
# I haven't tested it. I don't recommend trying: it is your defense
# against test runs hanging forever.
#
# Note that no-debugger unattended mode (sys161 -X) is always used.
# The purpose of this script is specifically to support unattended
# test runs...
#
# Depends on pexpect, which you may need to install specifically
# depending on your OS.
#
import time
import pexpect
#
# Macro commands
#
macros = {
"MOUNT" : ["mount sfs lhd1:", "cd lhd1:"],
"UNMOUNT" : ["cd /", "unmount lhd1:"],
# "WAIT" special-cased below
}
#
# Wait for a prompt; returns True if we got it, False if we need to
# bail.
#
def getprompt(proc, prompt):
which = proc.expect_exact([
prompt,
"panic: ", # panic message
"sys161: No progress in ", # sys161 deadman print
"sys161: Elapsed ", # sys161 shutdown print
pexpect.EOF,
pexpect.TIMEOUT
])
if which == 0:
# got the prompt
return None
if which == 1:
proc.expect_exact([pexpect.EOF, pexpect.TIMEOUT])
return "panic"
if which == 2:
proc.expect_exact([pexpect.EOF, pexpect.TIMEOUT])
return "progress timeout"
if which == 3:
proc.expect_exact([pexpect.EOF, pexpect.TIMEOUT])
return "unexpected shutdown"
if which == 4:
return "unexpected end of input"
if which == 5:
return "top-level timeout"
return "runtest: Internal error: pexpect returned out-of-range result"
# end getprompt
#
# main test function
#
def run(testcommands, outputfile,
menuprompt=None, shellprompt=None,
conf=None, ram=None, cpus=None,
doom=None,
progress=30, timeout=300,
kernel=None):
if menuprompt is None:
menuprompt = "OS/161 kernel [? for menu]: "
if shellprompt is None:
shellprompt = "OS/161$ "
if kernel is None:
kernel = "kernel"
args = ["-X"]
if conf is not None:
args.append("-c")
args.append(conf)
if cpus is not None:
args.append("-C")
args.append("31:cpus=%d" % cpus)
if doom is not None:
args.append("-D")
args.append("%d" % doom)
if progress is not None:
args.append("-Z")
args.append("%d" % progress)
if ram is not None:
args.append("-C")
args.append("31:ramsize=%s" % ram)
args.append(kernel)
proc = pexpect.spawn("sys161", args, timeout=timeout,
ignore_sighup=False)
proc.logfile_read = outputfile
commands = [s.strip() for s in testcommands.split(";")]
commands = [macros[c] if c in macros else [c] for c in commands]
# Apparently list flatten() is unpythonic...
commands = [c for sublist in commands for c in sublist]
prompts = { True: shellprompt, False: menuprompt }
inshell = False
quit = False
for cmd in commands:
msg = getprompt(proc, prompts[inshell])
if msg is not None:
return msg
if cmd == "WAIT":
time.sleep(3)
cmd = ""
proc.send("%s\r" % cmd)
if not inshell and cmd == "q":
quit = True
if not inshell and cmd == "s":
inshell = True
if inshell and cmd == "exit":
inshell = False
if not quit:
if inshell:
msg = getprompt(proc, prompts[inshell])
if msg is not None:
return msg
proc.send("exit\r")
inshell = False
msg = getprompt(proc, prompts[inshell])
if msg is not None:
return msg
proc.send("q\r")
quit = True
proc.expect_exact([pexpect.EOF, pexpect.TIMEOUT])
# Apparently if you call pexpect.wait() you must have
# explicitly read all the input, or it hangs; and the process
# can't be already dead, or it crashes. Therefore it appears
# to be entirely useless. I hope not calling it doesn't cause
# zombies to accumulate.
#proc.wait()
return None
# end run
| null |
root/testscripts/runtest.py
|
runtest.py
|
py
| 6,552 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pexpect.EOF",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "pexpect.TIMEOUT",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "pexpect.EOF",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "pexpect.TIMEOUT",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "pexpect.EOF",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "pexpect.TIMEOUT",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "pexpect.EOF",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "pexpect.TIMEOUT",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "pexpect.spawn",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "pexpect.EOF",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "pexpect.TIMEOUT",
"line_number": 192,
"usage_type": "attribute"
}
] |
42089129
|
###########################################
#
# Example script to build a
# pinout diagram. Includes basic
# features and convenience classes.
#
###########################################
from pinout.core import Diagram, Group, Rect, Image
from pinout.components.pinlabel import PinLabelGroup, PinLabel
from pinout.components.type import TextBlock
from pinout.components import leaderline as lline
from pinout.components.legend import Legend
# Import data for the diagram
import data
# Create a new diagram and add a background
diagram = Diagram(1024, 576, "diagram")
diagram.add(Rect(0, 0, 1024, 576, "diagram__bg"))
# Add a stylesheet
diagram.add_stylesheet("styles.css", True)
# Create a layout for diagram
panel_main = diagram.add(Group(2, 2, "panel panel--main"))
panel_main.add(Rect(0, 0, 1020, 438, "panel__bg"))
info_panel = diagram.add(Group(x=2, y=442, tag="panel panel--info"))
info_panel.add(Rect(0, 0, 1020, 132, tag="panel__bg"))
# Create a group to hold the pinout-diagram components.
graphic = panel_main.add(Group(400, 42))
# Add and embed an image
graphic.add(Image("hardware.png", width=220, height=260, embed=True))
# Create a single pin label
graphic.add(
PinLabel(
content="RESET",
x=155,
y=244,
tag="pwr",
body={"x": 117, "y": 30},
leaderline={"direction": "vh"},
)
)
# Create pinlabels on the right header
graphic.add(
PinLabelGroup(
x=206,
y=100,
pin_pitch=(0, 30),
label_start=(60, 0),
label_pitch=(0, 30),
labels=data.right_header,
)
)
# Create pinlabels on the left header
graphic.add(
PinLabelGroup(
x=16,
y=100,
pin_pitch=(0, 30),
label_start=(60, 0),
label_pitch=(0, 30),
scale=(-1, 1),
labels=data.left_header,
)
)
# Create pinlabels on the lower header
graphic.add(
PinLabelGroup(
x=65,
y=244,
scale=(-1, 1),
pin_pitch=(30, 0),
label_start=(110, 30),
label_pitch=(0, 30),
labels=data.lower_header,
leaderline=lline.Curved(direction="vh"),
)
)
# Create a title and a text-block
title_block = info_panel.add(
TextBlock(
data.title,
x=0,
y=0,
width=338,
height=42,
offset=(20, 33),
line_height=18,
tag="panel title_block",
)
)
info_panel.add(
TextBlock(
data.description.split("\n"),
x=0,
y=title_block.y + title_block.height,
width=title_block.width,
height=info_panel.height - title_block.height,
offset=(20, 18),
line_height=18,
tag="panel text_block",
)
)
# Create a legend
legend = info_panel.add(
Legend(
data.legend,
x=338,
y=0,
max_height=132,
)
)
# Export final SVG diagram
diagram.export("quick_start_pinout_diagram.svg", True)
| null |
pinout/resources/quick_start/pinout_diagram.py
|
pinout_diagram.py
|
py
| 2,916 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pinout.core.Diagram",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pinout.core.Rect",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pinout.core.Group",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pinout.core.Rect",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pinout.core.Group",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pinout.core.Rect",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pinout.core.Group",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pinout.core.Image",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pinout.components.pinlabel.PinLabel",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pinout.components.pinlabel.PinLabelGroup",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "data.right_header",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "pinout.components.pinlabel.PinLabelGroup",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "data.left_header",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "pinout.components.pinlabel.PinLabelGroup",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "data.lower_header",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "pinout.components.leaderline.Curved",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pinout.components.leaderline",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "pinout.components.type.TextBlock",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "data.title",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "pinout.components.type.TextBlock",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "data.description.split",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "data.description",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "pinout.components.legend.Legend",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "data.legend",
"line_number": 119,
"usage_type": "attribute"
}
] |
233203497
|
import datetime
from uuid import uuid1
import numpy as np
from backend.models import Video, DjangoUser, VideoRating, UserInformation, UserPreferences, \
VideoRatingPrivacy
from backend.rating_fields import VIDEO_FIELDS
from helpers import login, logout, test_username, create_test_video, \
random_alphanumeric, do_api_call_v2, TIME_WAIT
from selenium.webdriver.common.by import By # noqa: E402
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC # noqa: E402
from selenium.webdriver.support.ui import WebDriverWait # noqa: E402
def test_representative_privacy(driver, django_db_blocker):
with django_db_blocker.unblock():
u = DjangoUser.objects.create(username=random_alphanumeric(), is_active=True)
UserInformation.objects.create(user=u, show_my_profile=False)
u1 = DjangoUser.objects.create(username=random_alphanumeric(), is_active=True)
UserInformation.objects.create(user=u1, show_my_profile=True)
up1 = UserPreferences.objects.create(user=u1)
v = Video.objects.create(video_id=random_alphanumeric(), name="test",
**{f: 10 for f in VIDEO_FIELDS})
VideoRating.objects.create(user=up1, video=v, **{f: 10 for f in VIDEO_FIELDS})
u2 = DjangoUser.objects.create(username=random_alphanumeric(), is_active=True)
UserInformation.objects.create(user=u2, show_my_profile=False)
up2 = UserPreferences.objects.create(user=u2)
VideoRating.objects.create(user=up2, video=v, **{f: 10 for f in VIDEO_FIELDS})
VideoRatingPrivacy.objects.create(video=v, user=up2, is_public=True)
u3 = DjangoUser.objects.create(username=random_alphanumeric(), is_active=True)
UserInformation.objects.create(user=u3, show_my_profile=False)
up3 = UserPreferences.objects.create(user=u3)
VideoRating.objects.create(user=up3, video=v, **{f: 10 for f in VIDEO_FIELDS})
VideoRatingPrivacy.objects.create(video=v, user=up3, is_public=False)
login(driver)
results = do_api_call_v2(driver, '/user_information/public_models/')
results = [x['username'] for x in results['results']]
assert test_username in results
# no videos
assert u.username not in results
# default value
if VideoRatingPrivacy.DEFAULT_VALUE_IS_PUBLIC:
assert u1.username in results
else:
assert u1.username not in results
# public (explicitly)
assert u2.username in results
# private (explicitly)
assert u3.username not in results
# no videos for myself, but allowed to search with own username anyway
assert do_api_call_v2(driver, '/videos/search_tournesol/?search_model=' + test_username,
expect_fail=True).ok
# u1 is either private or public (default value)
if VideoRatingPrivacy.DEFAULT_VALUE_IS_PUBLIC:
r1 = do_api_call_v2(driver,
'/videos/search_tournesol/?reliability=1&search_model=' + u1.username)
assert len(r1['results']) == 1
else:
assert do_api_call_v2(
driver, '/videos/search_tournesol/?reliability=1&search_model=' + u1.username,
expect_fail=True).status_code == 403
# u2 has public videos
r1 = do_api_call_v2(driver,
'/videos/search_tournesol/?reliability=1&search_model=' + u2.username)
assert len(r1['results']) == 1
# u/u3 doesn't have public videos
assert do_api_call_v2(driver, '/videos/search_tournesol/?search_model=' + u.username,
expect_fail=True).status_code == 403
assert do_api_call_v2(driver, '/videos/search_tournesol/?search_model=' + u3.username,
expect_fail=True).status_code == 403
if VideoRatingPrivacy.DEFAULT_VALUE_IS_PUBLIC:
with django_db_blocker.unblock():
VideoRatingPrivacy.objects.create(video=v, user=up1, is_public=False)
assert do_api_call_v2(
driver, '/videos/search_tournesol/?reliability=1&search_model=' + u1.username,
expect_fail=True).status_code == 403
results = do_api_call_v2(driver, '/user_information/public_models/')
assert u1.username not in results
else:
with django_db_blocker.unblock():
VideoRatingPrivacy.objects.create(video=v, user=up1, is_public=True)
r1 = do_api_call_v2(driver,
'/videos/search_tournesol/?reliability=1&search_model=' + u1.username)
assert len(r1['results']) == 1
results = do_api_call_v2(driver, '/user_information/public_models/')
assert u1.username in results
logout(driver)
with django_db_blocker.unblock():
u.delete()
def select_options(driver, key, value):
"""Select an option in search options."""
WebDriverWait(driver, TIME_WAIT).until(
EC.presence_of_element_located((By.CLASS_NAME, 'all_search_options')))
labels = driver.find_element_by_class_name('all_search_options').find_elements_by_tag_name(
'label')
label = [x for x in labels if x.text == key][0]
idx = labels.index(label)
select = driver.find_element_by_class_name(
'all_search_options').find_elements_by_tag_name('select')[idx]
select.click()
options = select.find_elements_by_tag_name('option')
option = [x for x in options if x.get_attribute('text') == value][0]
option.click()
def test_representative_search(driver, django_db_blocker):
with django_db_blocker.unblock():
u = DjangoUser.objects.create(username=random_alphanumeric(), is_active=True)
up = UserPreferences.objects.create(user=u)
UserInformation.objects.create(user=u, show_my_profile=True)
video_id = create_test_video()
video = Video.objects.get(video_id=video_id)
ratings = {f: np.random.randn() for f in VIDEO_FIELDS}
VideoRating.objects.create(user=up, video=video, **ratings)
VideoRatingPrivacy.objects.create(user=up, video=video, is_public=True)
login(driver)
ui_button = driver.find_element_by_id('user_interface')
ui_button.click()
WebDriverWait(driver, TIME_WAIT).until(
EC.presence_of_element_located((By.CLASS_NAME, 'search_options')))
driver.find_element_by_class_name('search_options').click()
inp_model = driver.find_element_by_id('autocomplete_search_model')
inp_model.send_keys(len('Aggregated') * [Keys.BACK_SPACE])
inp_model.send_keys(f"{u.username}'s representative")
driver.find_element_by_class_name('MuiAutocomplete-popper').click()
WebDriverWait(driver, TIME_WAIT).until(
EC.presence_of_element_located((By.ID, 'id_search_not_loading')))
print("Loading recommendations")
load_rec_btn = driver.find_element_by_id("load_recommendations")
load_rec_btn.click()
WebDriverWait(driver, TIME_WAIT).until(
EC.presence_of_element_located((By.ID, 'id_search_not_loading')))
WebDriverWait(driver, TIME_WAIT).until(
EC.presence_of_element_located((By.CLASS_NAME, f'video_card_id_{video_id}')))
# now will only see 1 video
assert driver.find_elements_by_class_name(f'video_card_id_{video_id}')
logout(driver)
with django_db_blocker.unblock():
u.delete()
Video.objects.filter(video_id=video_id).delete()
def test_filters(driver, django_db_blocker):
login(driver)
ui_button = driver.find_element_by_id('user_interface')
ui_button.click()
unique_substr = str(uuid1())
# creating videos
with django_db_blocker.unblock():
video_ids = [create_test_video() for _ in range(3)]
videos = [Video.objects.get(video_id=vid) for vid in video_ids]
for v in videos:
v.name = unique_substr + " " + v.name
for f in VIDEO_FIELDS:
setattr(v, f, 1e10)
v.save()
videos[0].duration = datetime.timedelta(minutes=18)
videos[0].views = 9999
videos[0].publication_date = datetime.datetime.now() - datetime.timedelta(days=1)
videos[0].language = 'en'
videos[0].save()
WebDriverWait(driver, TIME_WAIT).until(
EC.presence_of_element_located((By.ID, 'search_phrase')))
# setting search field
search_phrase_field = driver.find_element_by_id('search_phrase')
search_phrase_field.clear()
search_phrase_field.send_keys(unique_substr)
WebDriverWait(driver, TIME_WAIT).until(
EC.presence_of_element_located((By.ID, 'id_search_not_loading')))
# can get all videos
print("Loading recommendations")
load_rec_btn = driver.find_element_by_id("load_recommendations")
load_rec_btn.click()
WebDriverWait(driver, TIME_WAIT).until(
EC.presence_of_element_located((By.ID, 'id_search_not_loading')))
target_classname = f'video_card_id_{video_ids[-1]}'
print(video_ids, video_ids[-1], target_classname)
WebDriverWait(driver, TIME_WAIT * 5).until(
EC.presence_of_element_located((By.CLASS_NAME, target_classname)))
videos_out = [x for video_id in video_ids for x in
driver.find_elements_by_class_name(f'video_card_id_{video_id}')]
print(videos_out)
assert len(videos_out) == len(videos)
# target search options
values = {
'Minimum Duration': '15 min',
'Maximum Duration': '30 min',
'Minimum Number of Views': '0 views',
'Maximum Number of Views': '10k views',
'Publication Date': 'Last month',
'Language': 'English'
}
# opening search options
driver.find_element_by_class_name('search_options').click()
# setting options
for key, value in values.items():
select_options(driver, key, value)
WebDriverWait(driver, TIME_WAIT).until(
EC.presence_of_element_located((By.ID, 'id_search_not_loading')))
print("Loading recommendations")
load_rec_btn = driver.find_element_by_id("load_recommendations")
load_rec_btn.click()
WebDriverWait(driver, TIME_WAIT).until(
EC.presence_of_element_located((By.ID, 'id_search_not_loading')))
target_classname = f'video_card_id_{video_ids[0]}'
print(video_ids, target_classname)
WebDriverWait(driver, TIME_WAIT * 5).until(
EC.presence_of_element_located((By.CLASS_NAME, target_classname)))
# now will only see 1 video
videos_out = [x for video_id in video_ids for x in
driver.find_elements_by_class_name(f'video_card_id_{video_id}')]
assert len(videos_out) == 1
logout(driver)
| null |
integration_test/test_search_filter.py
|
test_search_filter.py
|
py
| 10,838 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "backend.models.DjangoUser.objects.create",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "backend.models.DjangoUser.objects",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "backend.models.DjangoUser",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "helpers.random_alphanumeric",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "backend.models.UserInformation.objects.create",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "backend.models.UserInformation.objects",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "backend.models.UserInformation",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "backend.models.DjangoUser.objects.create",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "backend.models.DjangoUser.objects",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "backend.models.DjangoUser",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "helpers.random_alphanumeric",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "backend.models.UserInformation.objects.create",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "backend.models.UserInformation.objects",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "backend.models.UserInformation",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "backend.models.UserPreferences.objects.create",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "backend.models.UserPreferences.objects",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "backend.models.UserPreferences",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "backend.models.Video.objects.create",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "backend.models.Video.objects",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "backend.models.Video",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "helpers.random_alphanumeric",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "backend.rating_fields.VIDEO_FIELDS",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "backend.models.VideoRating.objects.create",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "backend.models.VideoRating.objects",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "backend.models.VideoRating",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "backend.rating_fields.VIDEO_FIELDS",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "backend.models.DjangoUser.objects.create",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "backend.models.DjangoUser.objects",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "backend.models.DjangoUser",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "helpers.random_alphanumeric",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "backend.models.UserInformation.objects.create",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "backend.models.UserInformation.objects",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "backend.models.UserInformation",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "backend.models.UserPreferences.objects.create",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "backend.models.UserPreferences.objects",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "backend.models.UserPreferences",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "backend.models.VideoRating.objects.create",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "backend.models.VideoRating.objects",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "backend.models.VideoRating",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "backend.rating_fields.VIDEO_FIELDS",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "backend.models.VideoRatingPrivacy.objects.create",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "backend.models.VideoRatingPrivacy.objects",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "backend.models.VideoRatingPrivacy",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "backend.models.DjangoUser.objects.create",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "backend.models.DjangoUser.objects",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "backend.models.DjangoUser",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "helpers.random_alphanumeric",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "backend.models.UserInformation.objects.create",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "backend.models.UserInformation.objects",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "backend.models.UserInformation",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "backend.models.UserPreferences.objects.create",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "backend.models.UserPreferences.objects",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "backend.models.UserPreferences",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "backend.models.VideoRating.objects.create",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "backend.models.VideoRating.objects",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "backend.models.VideoRating",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "backend.rating_fields.VIDEO_FIELDS",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "backend.models.VideoRatingPrivacy.objects.create",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "backend.models.VideoRatingPrivacy.objects",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "backend.models.VideoRatingPrivacy",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "helpers.login",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "helpers.do_api_call_v2",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "helpers.test_username",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "backend.models.VideoRatingPrivacy.DEFAULT_VALUE_IS_PUBLIC",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "backend.models.VideoRatingPrivacy",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "helpers.do_api_call_v2",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "helpers.test_username",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "backend.models.VideoRatingPrivacy.DEFAULT_VALUE_IS_PUBLIC",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "backend.models.VideoRatingPrivacy",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "helpers.do_api_call_v2",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "helpers.do_api_call_v2",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "helpers.do_api_call_v2",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "helpers.do_api_call_v2",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "helpers.do_api_call_v2",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "backend.models.VideoRatingPrivacy.DEFAULT_VALUE_IS_PUBLIC",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "backend.models.VideoRatingPrivacy",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "backend.models.VideoRatingPrivacy.objects.create",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "backend.models.VideoRatingPrivacy.objects",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "backend.models.VideoRatingPrivacy",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "helpers.do_api_call_v2",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "helpers.do_api_call_v2",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "backend.models.VideoRatingPrivacy.objects.create",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "backend.models.VideoRatingPrivacy.objects",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "backend.models.VideoRatingPrivacy",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "helpers.do_api_call_v2",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "helpers.do_api_call_v2",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "helpers.logout",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "helpers.TIME_WAIT",
"line_number": 116,
"usage_type": "argument"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CLASS_NAME",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "backend.models.DjangoUser.objects.create",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "backend.models.DjangoUser.objects",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "backend.models.DjangoUser",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "helpers.random_alphanumeric",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "backend.models.UserPreferences.objects.create",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "backend.models.UserPreferences.objects",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "backend.models.UserPreferences",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "backend.models.UserInformation.objects.create",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "backend.models.UserInformation.objects",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "backend.models.UserInformation",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "helpers.create_test_video",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "backend.models.Video.objects.get",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "backend.models.Video.objects",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "backend.models.Video",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "numpy.random.randn",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "backend.rating_fields.VIDEO_FIELDS",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "backend.models.VideoRating.objects.create",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "backend.models.VideoRating.objects",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "backend.models.VideoRating",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "backend.models.VideoRatingPrivacy.objects.create",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "backend.models.VideoRatingPrivacy.objects",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "backend.models.VideoRatingPrivacy",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "helpers.login",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "helpers.TIME_WAIT",
"line_number": 146,
"usage_type": "argument"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CLASS_NAME",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.keys.Keys.BACK_SPACE",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.keys.Keys",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "helpers.TIME_WAIT",
"line_number": 155,
"usage_type": "argument"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "helpers.TIME_WAIT",
"line_number": 162,
"usage_type": "argument"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "helpers.TIME_WAIT",
"line_number": 165,
"usage_type": "argument"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CLASS_NAME",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "helpers.logout",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "backend.models.Video.objects.filter",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "backend.models.Video.objects",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "backend.models.Video",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "helpers.login",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "uuid.uuid1",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "helpers.create_test_video",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "backend.models.Video.objects.get",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "backend.models.Video.objects",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "backend.models.Video",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "backend.rating_fields.VIDEO_FIELDS",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "helpers.TIME_WAIT",
"line_number": 202,
"usage_type": "argument"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 203,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "helpers.TIME_WAIT",
"line_number": 210,
"usage_type": "argument"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "helpers.TIME_WAIT",
"line_number": 218,
"usage_type": "argument"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 219,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "helpers.TIME_WAIT",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CLASS_NAME",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "helpers.TIME_WAIT",
"line_number": 248,
"usage_type": "argument"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 249,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "helpers.TIME_WAIT",
"line_number": 255,
"usage_type": "argument"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "helpers.TIME_WAIT",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 261,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CLASS_NAME",
"line_number": 261,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 261,
"usage_type": "name"
},
{
"api_name": "helpers.logout",
"line_number": 268,
"usage_type": "call"
}
] |
515509084
|
import torch
import numpy as np
from torch.autograd import Variable
from chinese import chinese
from config import RESULT_FILE, IMPROVED_RESULT_FILE, TEST_GOLD, TEST_FILE, MAX_LEN, BIGRAM
class Eval(object):
def __init__(self, da_idx, BATCH_SIZE):
self.da_idx = da_idx
self.BATCH_SIZE = BATCH_SIZE
self.init()
def init(self):
self.improved_P = 0.6
self.improved_R = 0.6
self.improved_F = 0.6
self.P = 0.6
self.R = 0.6
self.F = 0.6
self.test_loss = 100
def proc(self, ans_sentence):
for ch in '的了和与就在很也都将你我他她它要这上':
ans_sentence = ans_sentence.replace(ch, ' '+ch+' ')
while ' ' in ans_sentence:
ans_sentence = ans_sentence.replace(' ', ' ')
word_lst = ['了解', '为了', '除了',
'与其', '与否', '参与',
'成就', '就要',
'现在', '正在', '存在', '所在', '在于',
'很多', '很难', '很快',
'即将', '必将', '将来',
'你们', '他们', '其他', '其它', '它们','她们', '我们', '我国', '自我',
'主要', '需要', '要求', '重要', '只要', '还要',
'这里', '这次', '这样', '这种', '这是', '这些', '这个',
'上午', '上年', '上海', '上市', '以上']
for ch in word_lst:
ans_sentence = ans_sentence.replace(ch[0] + ' ' + ch[1:], ' '+ch+' ')
ans_sentence = ans_sentence.replace(ch[:-1] + ' ' + ch[-1], ' '+ch+' ')
ans_sentence = ans_sentence.replace(' '.join(ch), ' '+ch+' ')
for moon in ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12',
'一', '二', '三', '四', '五', '六', '七', '八', '九', '十', '十一', '十二']:
ans_sentence = ans_sentence.replace('年 '+moon+'月', '年'+moon+'月')
ans_sentence = ans_sentence.replace('月 '+moon, '月'+moon)
ans_sentence = ans_sentence.replace(moon + ' 月', moon+'月')
while ' ' in ans_sentence:
ans_sentence = ans_sentence.replace(' ', ' ')
while ' \n' in ans_sentence:
ans_sentence = ans_sentence.replace(' \n', '\n')
return ans_sentence
def deal(self, ans):
ret = ""
for i in range(len(ans)):
if str(type(ans[i])) == "<class 'list'>":
x = ans[i][0]
num_lst = ans[i][1]
count = ans[i][2]
line = ans[i][3]
xxcount = 0
for ele in x:
if ele != '':
j = 0
while j < len(ele):
if ele[j:j+5] in ['<NUM>', '<ENG>']:
ret += num_lst[xxcount]
count += len(num_lst[xxcount])
xxcount += 1
j += 5
elif ele[j:j+5] == '<OOV>':
ret += line[count]
count += 1
j += 5
else:
ret += ele[j]
count += 1
j += 1
ret += ' '
else:
ret += ans[i]
return ret
def transform(self, ans, NPMT):
to_do_lst = []
loss = 0
loss_len = 0
for i in range(len(ans)):
ele = ans[i]
if str(type(ele)) == "<class 'list'>":
if len(ele[0]) <= 2:
ele[0] = [''.join(ele[0])]
else:
to_do_lst.append(i)
if len(to_do_lst) == self.BATCH_SIZE or (i == len(ans)-1 and len(to_do_lst) > 0):
to_do_lst.sort(key=lambda x: len(ans[x][0]), reverse=True)
len_lst = [len(ans[_][0]) for _ in to_do_lst]
maxlen = max(len_lst)
x_batch = []
bi_x_batch = []
length_batch = []
for j in to_do_lst:
words = ans[j][0][:]
length = len(words)
bi_idx, uni_idx = self.da_idx.to_bi_index(words)
uni_idx = uni_idx + [0]*(maxlen - len(ans[j][0]))
bi_idx = bi_idx + [0]*(maxlen - len(ans[j][0]))
x_batch.append(uni_idx)
bi_x_batch.append(bi_idx)
length_batch.append(len(ans[j][0]))
for i in range(len(to_do_lst)):
loss_len += length_batch[i]
x_batch = np.asarray(x_batch, dtype=np.int32)
bi_x_batch = np.asarray(bi_x_batch, dtype=np.int32)
length_batch = np.asarray(length_batch, dtype=np.int32)
x = Variable(torch.LongTensor(x_batch).cuda(), volatile = True).view(len(to_do_lst), -1)
if BIGRAM:
bi_x = VOCABS.bi_vectors[bi_x_batch]
bi_x = Variable(torch.Tensor(bi_x).cuda(), volatile = True).view(len(to_do_lst), -1,BIGRAM_DIM)
else:
bi_x = None
length = Variable(torch.LongTensor(length_batch).cuda(), volatile = True).view(-1)
ret = NPMT(x, length)
loss += float(ret[0])
for j, outstr in zip(to_do_lst, ret[1]):
ans[j][0] = outstr.split('|')
to_do_lst = []
self.test_loss = loss / loss_len
def test_process(self, NPMT):
NPMT.eval()
ans = []
for line in open(TEST_FILE, 'r').readlines():
line = line.strip()
sentence = []
num_lst = []
curr_num = ""
count = 0
start = 0
for i in range(len(line)):
_ = line[i]
if len(sentence) >= MAX_LEN:
if curr_num != "":
continue
ans.append([sentence[:], num_lst[:], start, line])
start = i
num_lst = []
sentence = []
if chinese(_) not in ['<PUNC>', '<NUM>', '<ENG>']:
if curr_num != "":
num_lst.append(curr_num)
curr_num = ""
sentence.append(_)
elif chinese(_) in ['<NUM>', '<ENG>']:
if curr_num == '':
sentence.append(chinese(_))
curr_num += _
elif chinese(_) == '<PUNC>':
if curr_num != "":
num_lst.append(curr_num)
curr_num = ''
if len(sentence) > 0:
ans.append([sentence[:], num_lst[:], start, line])
ans.append(_+' ')
num_lst = []
sentence = []
start = i+1
if len(sentence) > 0:
if curr_num != "":
num_lst.append(curr_num)
curr_num = ''
ans.append([sentence[:], num_lst[:], start, line])
ans.append(' ')
ans.append('\n')
self.transform(ans, NPMT)
fout = open(RESULT_FILE + '.txt', 'w')
fout.write(self.deal(ans))
fout.close()
def improved_eval_process(self):
improved = open(IMPROVED_RESULT_FILE + '.txt', 'w')
my_ans = open(RESULT_FILE + '.txt', 'r').readlines()[:-1]
gold_ans = open(TEST_GOLD, 'r').readlines()[:len(my_ans)]
pred_seg = 0
ans_seg = 0
common_seg = 0
for ans_sentence, pred_sentence in zip(my_ans, gold_ans):
ans_sentence = self.proc(ans_sentence)
improved.write(ans_sentence)
ans = []
for word in ans_sentence.strip().split(' '):
for i in range(len(word) - 1):
ans.append(-1)
if word != '':
ans.append(word)
pred = []
for word in pred_sentence.strip().split(' '):
for i in range(len(word) - 1):
pred.append(-1)
pred.append(word)
for ans_word, pred_word in zip(ans, pred):
if pred_word != -1:
pred_seg+=1
if ans_word != -1:
ans_seg+=1
if ans_word != -1 and pred_word == ans_word:
common_seg+=1
print(ans_seg, pred_seg, common_seg)
self.improved_P = common_seg/ans_seg
self.improved_R = common_seg/pred_seg
self.improved_F = 2/(1/self.improved_P +1/self.improved_R)
improved.close()
print(self.improved_P, self.improved_R, self.improved_F)
def eval_process(self):
my_ans = open(RESULT_FILE + '.txt', 'r').readlines()[:-1]
gold_ans = open(TEST_GOLD, 'r').readlines()[:len(my_ans)]
pred_seg = 0
ans_seg = 0
common_seg = 0
for ans_sentence, pred_sentence in zip(my_ans, gold_ans):
ans = []
for word in ans_sentence.strip().split(' '):
for i in range(len(word) - 1):
ans.append(-1)
if word != '':
ans.append(word)
pred = []
for word in pred_sentence.strip().split(' '):
for i in range(len(word) - 1):
pred.append(-1)
pred.append(word)
for ans_word, pred_word in zip(ans, pred):
if pred_word != -1:
pred_seg+=1
if ans_word != -1:
ans_seg+=1
if ans_word != -1 and pred_word == ans_word:
common_seg+=1
print(ans_seg, pred_seg, common_seg)
self.P = common_seg/ans_seg
self.R = common_seg/pred_seg
self.F = 2/(1/self.P+1/self.R)
print(self.P, self.R,self.F)
| null |
evaluate.py
|
evaluate.py
|
py
| 10,496 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.asarray",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "numpy.asarray",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "numpy.asarray",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "torch.LongTensor",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "config.BIGRAM",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "torch.LongTensor",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "config.TEST_FILE",
"line_number": 144,
"usage_type": "argument"
},
{
"api_name": "config.MAX_LEN",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "chinese.chinese",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "chinese.chinese",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "chinese.chinese",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "chinese.chinese",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "config.RESULT_FILE",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "config.IMPROVED_RESULT_FILE",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "config.RESULT_FILE",
"line_number": 195,
"usage_type": "name"
},
{
"api_name": "config.TEST_GOLD",
"line_number": 196,
"usage_type": "argument"
},
{
"api_name": "config.RESULT_FILE",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "config.TEST_GOLD",
"line_number": 232,
"usage_type": "argument"
}
] |
99516730
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
from flask import Flask, render_template, request, redirect, session, g, flash, url_for
import sqlite3
import validators
import os
import requests
app = Flask(__name__)
app.config.from_object(__name__)
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'bookcatalogue.db'),
SECRET_KEY='secret key'
))
# In[ ]:
def connect_db():
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
# In[ ]:
def get_db():
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
# In[ ]:
@app.route('/')
def hello_world():
return redirect('/login')
# In[ ]:
# User must log in
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
db = get_db()
cur = db.execute('select id from users where username=? and password=?', [request.form['username'], request.form['password']])
validUser = cur.fetchone()
if validUser:
session['logged_in'] = True
session['user_id'] = validUser[0]
flash('You are logged in')
return redirect(url_for('show_books'))
else:
session['logged_in'] = False
error = 'Invalid username or password'
return render_template('login.html', error=error)
# In[ ]:
@app.route('/showbooks', methods=['GET'])
def show_books():
if not session.get('logged_in'):
redirect(url_for('login'))
error = None
db = get_db()
cur = db.execute('select user_id, title, author, page_count, average_rating, thumbnail from bookcatalogue where user_id=?', [session['user_id']])
books = cur.fetchall()
return render_template('showbooks.html', books=books)
# In[ ]:
@app.route('/searchbooks', methods=['GET', 'POST'])
def search_books():
if not session.get('logged_in'):
redirect(url_for('login'))
error = None
if request.method == 'POST':
# check google for search results
r = requests.get('https://www.googleapis.com/books/v1/volumes?q=isbn:' + request.form['isbnnumber'])
json = r.json()
searchresults = []
for item in json['items']:
result = {}
result['title'] = item['volumeInfo']['title']
# check for authors
if 'authors' in item['volumeInfo'].keys():
result['author'] = item['volumeInfo']['authors'][0]
else:
result['author'] = 'Author not found'
# check for pageCount
if 'pageCount' in item['volumeInfo'].keys():
result['pageCount'] = item['volumeInfo']['pageCount']
else:
result['pageCount'] = 'Page count not found'
# check for averageRating
if 'averageRating' in item['volumeInfo'].keys():
result['averageRating'] = item['volumeInfo']['averageRating']
else:
result['averageRating'] = 'Average rating not found'
# check for thumbnail
if 'imageLinks' in item['volumeInfo'].keys():
result['thumbnail'] = item['volumeInfo']['imageLinks']['thumbnail']
else:
result['thumbnail'] = 'Thumbnail not found'
searchresults.append(result)
return render_template('searchbooks.html', searchresults=searchresults)
return render_template('searchbooks.html')
# In[ ]:
@app.route('/searchbooksbytitle', methods=['POST'])
def search_books_by_title():
if not session.get('logged_in'):
redirect(url_for('login'))
error = None
# check google for search results
r = requests.get('https://www.googleapis.com/books/v1/volumes?q=intitle:' + request.form['searchtitle'])
json = r.json()
searchresults = []
for item in json['items']:
result = {}
result['title'] = item['volumeInfo']['title']
# check for authors
if 'authors' in item['volumeInfo'].keys():
result['author'] = item['volumeInfo']['authors'][0]
else:
result['author'] = 'Author not found'
# check for pageCount
if 'pageCount' in item['volumeInfo'].keys():
result['pageCount'] = item['volumeInfo']['pageCount']
else:
result['pageCount'] = 'Page count not found'
# check for averageRating
if 'averageRating' in item['volumeInfo'].keys():
result['averageRating'] = item['volumeInfo']['averageRating']
else:
result['averageRating'] = 'Average rating not found'
# check for thumbnail
if 'imageLinks' in item['volumeInfo'].keys():
result['thumbnail'] = item['volumeInfo']['imageLinks']['thumbnail']
else:
result['thumbnail'] = 'Thumbnail not found'
searchresults.append(result)
return render_template('searchbooks.html', searchresults=searchresults)
# In[ ]:
@app.route('/addbook', methods=['GET'])
def add_book():
if not session.get('logged_in'):
redirect(url_for('login'))
error = None
db = get_db()
cur = db.execute('insert into bookcatalogue (user_id, title, author, page_count, average_rating, thumbnail) values (?, ?, ?, ?, ?, ?)', [session['user_id'], request.args['title'], request.args['author'], request.args['pageCount'], request.args['averageRating'], request.args['thumbnail']])
db.commit()
return redirect(url_for('show_books'))
# In[ ]:
@app.route('/deletebook', methods=['GET'])
def delete_book():
if not session.get('logged_in'):
redirect(url_for('login'))
error = None
db = get_db()
cur = db.execute('delete from bookcatalogue where user_id=? and title=? and author=? and page_count=? and average_rating=? and thumbnail=?', [session['user_id'], request.args['title'], request.args['author'], request.args['pageCount'], request.args['averageRating'], request.args['thumbnail']])
db.commit()
return redirect(url_for('show_books'))
# In[ ]:
if __name__ == '__main__':
app.run()
# In[ ]:
| null |
bookcatalogue.py
|
bookcatalogue.py
|
py
| 6,120 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.connect",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sqlite3.Row",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 35,
"usage_type": "argument"
},
{
"api_name": "flask.g.sqlite_db",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "flask.g.sqlite_db",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "flask.session.get",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "flask.session.get",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "flask.session.get",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "flask.session.get",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "flask.session.get",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 189,
"usage_type": "call"
}
] |
251233460
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from datetime import datetime
from typing import Text
import pytest
import six
from pkg_resources import DistributionNotFound
from pkg_resources import get_distribution
def _version(): # type: () -> Text
try:
return six.text_type(get_distribution(__name__).version)
except DistributionNotFound: # pragma: no cover
return '0.0.0-dev' # pragma: no cover
#: Semantic Version of the module.
__version__ = _version()
# noinspection SpellCheckingInspection
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--print-relative-time',
action='store_true',
dest="pytest_print_relative_time",
default=False,
help="Time in milliseconds when the print was invoked,"
" relative to the time the fixture was created.")
# noinspection SpellCheckingInspection
@pytest.fixture
def printer(request):
"""pytest plugin to print test progress steps in verbose mode"""
# noinspection PyUnusedLocal
def no_op(*args):
"""do nothing"""
if request.config.getoption('verbose') <= 0:
return no_op
terminal_reporter = request.config.pluginmanager.getplugin('terminalreporter')
if terminal_reporter is None:
return no_op # pragma: no cover
print_relative_time = request.config.getoption('pytest_print_relative_time')
first_call = [True]
start_datetime = datetime.now()
def _print(msg):
if first_call[0]: # in case of the first call we don't have a new empty line, print it
terminal_reporter.write('\n')
first_call[0] = False
terminal_reporter.write('\t')
if print_relative_time:
delta = datetime.now() - start_datetime
terminal_reporter.write(delta.total_seconds())
terminal_reporter.write('\t')
terminal_reporter.write(msg)
terminal_reporter.write('\n')
return _print
| null |
src/pytest_print/__init__.py
|
__init__.py
|
py
| 2,102 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "six.text_type",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pkg_resources.get_distribution",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pkg_resources.DistributionNotFound",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 37,
"usage_type": "attribute"
}
] |
301673029
|
import discord
import logging
from self_chat import chatbot_response_b
from random import randint
client = discord.Client()
step=0
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
global step
if message.content.startswith('*'):
to_send=chatbot_response_b(step=step,user=message.content)
print(to_send)
try:
await message.channel.send(to_send)
except:
await message.channel.send("no response...")
if message.author == client.user:
return
if message.content.startswith('$'):
if message.content == '$spam':
pass
print(message.content)
client.run("DISCORD BOT SECRET TOKEN")
| null |
Discord Bot NLP/bot.py
|
bot.py
|
py
| 777 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "discord.Client",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "self_chat.chatbot_response_b",
"line_number": 17,
"usage_type": "call"
}
] |
413982278
|
from django.contrib.auth import authenticate
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from chat.models import Chat
from django.contrib.auth.models import User
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import generics
from chat.serializers import ChatSerializer
# Create your views here.
def login(request):
return render(request, 'login.html')
def login_process(request):
if request.method == 'POST':
user_name = request.POST['name']
user_pwd = request.POST['pwd']
print(user_name,user_pwd)
request.session['username'] = user_name
user = authenticate(username=user_name, password=user_pwd)
if user:
all_users = User.objects.all()
print("======", all_users)
# return HttpResponse ("login sucessfully"+request.session['username'])
return render(request,'login_suc.html',{"all_users" :all_users})
else:
return render(request, 'login.html', {"message":'please enter correct username and password'})
return HttpResponse("added data")
def register(request):
return render(request, 'register.html')
def register_process(request):
if request.method == 'POST':
user_name = request.POST['name']
user_email = request.POST['pwd']
# user_mobile = request.POST['mobile']
user_pwd = request.POST['pwd']
print(user_name,user_pwd,user_email)
user = User.objects.create_user(user_name,user_email,user_pwd)
return HttpResponseRedirect('/base/login')
def chat_form(request, id):
user_obj = User.objects.get(id = id)
from_message = request.session['username']
return render(request, 'chat_forms.html', {"user_name": user_obj.username, 'from_message': from_message,
'id':user_obj.id})
def chat(request,id):
username = User.objects.get(id = id).username
from_message = request.session['username']
message =request.POST.get('message')
chat_obj = Chat.objects.create(from_message = from_message, to_message = username, message = message)
from_all_message = Chat.objects.filter(from_message = from_message, to_message = username).values()
print("=================", len(from_all_message))
messages = []
timestamp = []
for i in range(0,len(from_all_message)):
msg = from_all_message[i]['message']
time = from_all_message[i]['send_time']
messages.append(msg)
timestamp.append(time)
print("=================",(messages))
return render(request,'chat_details.html', {"user_name": username , 'from_message':from_message,
"message":messages, 'timestap': timestamp})
class Api(generics.ListCreateAPIView):
model = Chat
queryset = Chat.objects.all()
serializer_class = ChatSerializer
def post(self, request, *args, **kwargs):
user = request.user.id
##request.data.get('user_id', None)
print(user)
if not user:
user = 1
logs_obj = Chat(from_message=request.data.get('from_message', True),
to_message=request.data.get('to_message', True),
message= request.data.get('message', True))
logs_obj.save()
d = ChatSerializer(logs_obj, context={'request': request}).data
return Response(d, status=status.HTTP_201_CREATED)
| null |
chat/views.py
|
views.py
|
py
| 3,554 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.shortcuts.render",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects.all",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects.create_user",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects.get",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects.get",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "chat.models.Chat.objects.create",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "chat.models.Chat.objects",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "chat.models.Chat",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "chat.models.Chat.objects.filter",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "chat.models.Chat.objects",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "chat.models.Chat",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "rest_framework.generics.ListCreateAPIView",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "chat.models.Chat",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "chat.models.Chat.objects.all",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "chat.models.Chat.objects",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "chat.models.Chat",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "chat.serializers.ChatSerializer",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "chat.models.Chat",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "chat.serializers.ChatSerializer",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_201_CREATED",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 92,
"usage_type": "name"
}
] |
611315447
|
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from tristan_funcs_vecpot import vecpot2
import h5py
from scipy.ndimage.filters import gaussian_filter
plt.set_cmap('RdBu')
plt.rcParams['mathtext.fontset'] = 'stix'
plt.rcParams['font.family'] = 'STIXGeneral'
plt.rcParams.update({'font.size': 15})
t=47
t_str = '%03d' % t
#fldbase = "../../tristan-mp_reconnection/16k_triggered_finals/sig.3/delgam00005/output/flds.tot."
#fldbase = "../../tristan_acc-mec_Ez/8k_bguide.3_triggered_stride1_thresh2/output/flds.tot."
fldbase = "../../tristan_acc-mec_Ez/8k_untriggered_bguide.3/output/flds.tot."
#fldbase = "../../tristan_acc-mec_Ez/8k_bguide0_untriggered_stride1_thresh2/output/flds.tot."
fldbase += t_str
myfld = h5py.File(fldbase,'r')
dens = myfld['dens'][0,:,:]
bdens = myfld['bdens'][0,:,:]
edge = 5
smoothlen = 1
vecpot = vecpot2(myfld,12,3)[0]#[edge:-1*edge,:] #clip off top and bottom to remove numerical issue at boundary with vector potential calc
vecpot = vecpot[edge:-1*edge,:]
vecpot_smooth = gaussian_filter(vecpot,smoothlen)
#just getting bdens to be the same shape as vecpot
mx = bdens.shape[1]
bdens = bdens[:,2:mx-8]
bdens = bdens[edge:-1*edge,:]
print(np.shape(vecpot),np.shape(bdens))
xhlf = np.shape(vecpot)[1]/2
#print(xhlf)
#vecpot_slice = vecpot[:,xhlf]
#vecpot_smooth_slice = vecpot_smooth[:,xhlf]
#plt.plot(vecpot_slice,color="Red")
#plt.plot(vecpot_smooth_slice,color="Blue")
#plt.savefig('vecpot_slices_smooth.png')
#looks decent now let's do the differencing
#0 axis is up along y
#1 axis is in x-direction
#basic central difference:
myroll = 2
dady = (np.roll(vecpot_smooth,myroll,axis=0) -np.roll(vecpot_smooth,-1*myroll,axis=0))/myroll
dadx = (np.roll(vecpot_smooth,myroll,axis=1) - np.roll(vecpot_smooth,-1*myroll,axis=1))/myroll
d2ad2x = (np.roll(vecpot_smooth,myroll,axis=1)-2*vecpot_smooth+np.roll(vecpot_smooth,-1*myroll,axis=1))/myroll**2
d2ad2y = (np.roll(vecpot_smooth,myroll,axis=0)-2*vecpot_smooth+np.roll(vecpot_smooth,-1*myroll,axis=0))/myroll**2
d2adydx = (np.roll(dady,myroll,axis=1) - np.roll(dady,-1*myroll,axis=1))/myroll
d2adxdy = (np.roll(dadx,myroll,axis=0) - np.roll(dadx,-1*myroll,axis=0))/myroll
#first_deriv_tot = dady+dadx
#first_deriv_tot = first_deriv_tot[:,smoothlen+1:-smoothlen-2] #cut off stuff from rolling at end to avoid analyzing nonsense
dady = dady[:,smoothlen+1:-smoothlen-2]
dadx = dadx[:,smoothlen+1:-smoothlen-2]
d2ad2x = d2ad2x[:,smoothlen+1:-smoothlen-2]
d2ad2y = d2ad2y[:,smoothlen+1:-smoothlen-2]
d2adydx = d2adydx[:,smoothlen+1:-smoothlen-2]
d2adxdy = d2adxdy[:,smoothlen+1:-smoothlen-2]
#so first we're gonna have to loop through the first derivative and identify zeros.
bdens = bdens[:,smoothlen+1:-smoothlen-2]
convfac = 12/3./1000
#yext = np.shape(dens)[1]*convfac
yext = 2.7
mycut = 125
xext = (np.shape(dens)[0]-2*mycut)*convfac
#xext = (np.shape(dens)[1]-200)*convfac
print('dens shape ',np.shape(dens))
bdens_tolerence = 4
zero_tolerence = 2
bdens_loc = bdens<bdens_tolerence
dadx_zero_loc = np.abs(dadx)<zero_tolerence
dady_zero_loc = np.abs(dady)<zero_tolerence
first_deriv_zero_loc = dadx_zero_loc*dady_zero_loc*bdens_loc
#first_deriv_zero_loc = np.abs(first_deriv_tot)<zero_tolerence #locations where the first derivative is sufficiently close to zero (sufficiently being controlled by parameter "zero tolerence")
num_deriv_zero = np.sum(first_deriv_zero_loc)
print('number of zero derivative points : ',num_deriv_zero)
#now we loop through null points and ask the questions about 2nd derivatives
ylen,xlen = np.shape(dady)
#print(xlen,ylen)
#testarr = np.zeros((ylen,xlen))
#print(np.shape(testarr))
saddlepoint_arr = np.zeros((ylen,xlen))
saddle_count = 0
xpoint_loc_list = []
for i in range(ylen):
for j in range(xlen):
#only need to do operations if its a null point
if first_deriv_zero_loc[i][j]==1:
#testarr[i][j] += 1 #indexing is correct
H00 = d2ad2x[i][j] #values of the hessian matrix
H11 = d2ad2y[i][j]
H01 = d2adydx[i][j]
#coeffs of quadratic equations solving for eigenvals
a=1
b=H00+H11
c=H00*H11 - H01**2
#eigenvals
lambdaplus = (-b + np.sqrt(b**2 - 4*a*c))/(2*a)
lambdaminus = (-b - np.sqrt(b**2 -4*a*c))/(2*a)
#print('eigenvals : ',lambdaplus,lambdaminus)
#print('eigenvec : ',-b,a-lambdaplus)
if lambdaplus*lambdaminus < 0: #if they have opposite signs
#if True:
xpoint_loc_list.append([i,j])
saddle_count += 1
saddlepoint_arr[i][j]+=1
print('saddle point count : ',saddle_count)
plt.set_cmap('viridis')
#testing various quantities along the way
fig, ax0 = plt.subplots(1,1)#,sharey=True)
ax0.imshow(np.rot90(dens/4.)[mycut:-mycut,:],origin='lower',vmin=0,vmax=5,extent=[-yext/2, yext/2, -xext/2,xext/2])
for i in range(saddle_count):
#ax0.scatter((xpoint_loc_list[i][1]+4)*convfac - xext/2,(xpoint_loc_list[i][0]+1)*convfac - yext/2,marker='x',s=10,color="Red")
ax0.scatter((xpoint_loc_list[i][0]+2)*convfac - yext/2,-((xpoint_loc_list[i][1]+4)*convfac - xext/2)+.2, marker='x',s=20,color="Red")
ax0.set_xlabel('$x \; (1000 \; c/\omega_{p})$')
ax0.set_ylabel('$y \; (1000 \; c/\omega_{p})$')
1
#ax0.set_title('$\\frac{\partial A_{z}}{\partial x} + \\frac{\partial A_{z}}{\partial y}$')
#ax1.imshow(saddlepoint_arr,origin='lower')
#ax1.set_title('Saddle Points')
#plt.imshow(first_deriv_tot,origin='lower',vmin=-150,vmax=150)#,vmin=-5,vmax=5)
#plt.colorbar()
plt.savefig('xpoint_example.pdf',dpi=300,bbox_inches='tight')
plt.savefig('xpoint_example.png',dpi=300,bbox_inches='tight')
#plt.imshow(vecpot_smooth,origin='lower')
#plt.savefig('vecpot_smoothed.png',dpi=300,bbox_inches='tight')
#plt.close()
#plt.imshow(vecpot,origin='lower')
#plt.savefig('vecpot_orig.png',dpi=300,bbox_inches='tight')
#plt.close()
myfld.close()
| null |
smooth_xpoint_detect_rot90.py
|
smooth_xpoint_detect_rot90.py
|
py
| 6,016 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.use",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.set_cmap",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams.update",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "h5py.File",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tristan_funcs_vecpot.vecpot2",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.filters.gaussian_filter",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.roll",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.roll",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.roll",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.roll",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.roll",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.roll",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.set_cmap",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "numpy.rot90",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 188,
"usage_type": "name"
}
] |
264163053
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, cstr, flt
from frappe import _
from frappe.model.document import Document
from operator import itemgetter
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class FormisBOM(Document):
def autoname(self):
names = frappe.db.sql_list("""select name from `tabFormisBOM` where item=%s""", self.item)
if names:
# name can be BOM/ITEM/001, BOM/ITEM/001-1, BOM-ITEM-001, BOM-ITEM-001-1
# split by item
names = [name.split(self.item)[-1][1:] for name in names]
# split by (-) if cancelled
names = [cint(name.split('-')[-1]) for name in names]
idx = max(names) + 1
else:
idx = 1
self.name = 'BOM-' + self.item + ('-%.3i' % idx)
def validate(self):
self.clear_operations()
self.validate_main_item()
from erpnext.utilities.transaction_base import validate_uom_is_integer
validate_uom_is_integer(self, "stock_uom", "qty", "BOM Item")
self.validate_materials()
self.set_bom_material_details()
self.validate_operations()
self.calculate_cost()
def on_update(self):
self.check_recursion()
self.update_exploded_items()
def on_submit(self):
self.manage_default_bom()
def on_cancel(self):
frappe.db.set(self, "is_active", 0)
frappe.db.set(self, "is_default", 0)
# check if used in any other bom
self.validate_bom_links()
self.manage_default_bom()
def on_update_after_submit(self):
self.validate_bom_links()
self.manage_default_bom()
def get_item_det(self, item_code):
item = frappe.db.sql("""select name, item_name, docstatus, description, image,
is_sub_contracted_item, stock_uom, default_bom, last_purchase_rate
from `tabItem` where name=%s""", item_code, as_dict = 1)
if not item:
frappe.throw(_("Item: {0} does not exist in the system").format(item_code))
return item
def validate_rm_item(self, item):
if item[0]['name'] == self.item:
frappe.throw(_("Raw material cannot be same as main Item"))
def set_formisbom_material_details(self):
for item in self.get("items"):
ret = self.get_bom_material_detail({"item_code": item.item_code, "item_name": item.item_name, "bom_no": item.bom_no,
"qty": item.qty})
for r in ret:
if not item.get(r):
item.set(r, ret[r])
def get_formisbom_material_detail(self, args=None):
""" Get raw material details like uom, desc and rate"""
if not args:
args = frappe.form_dict.get('args')
if isinstance(args, basestring):
import json
args = json.loads(args)
item = self.get_item_det(args['item_code'])
self.validate_rm_item(item)
args['bom_no'] = args['bom_no'] or item and cstr(item[0]['default_bom']) or ''
args.update(item[0])
rate = self.get_rm_rate(args)
ret_item = {
'item_name' : item and args['item_name'] or '',
'description' : item and args['description'] or '',
'image' : item and args['image'] or '',
'stock_uom' : item and args['stock_uom'] or '',
'bom_no' : args['bom_no'],
'rate' : rate
}
return ret_item
def get_rm_rate(self, arg):
""" Get raw material rate as per selected method, if bom exists takes bom cost """
rate = 0
if arg['bom_no']:
rate = self.get_bom_unitcost(arg['bom_no'])
elif arg:
if self.rm_cost_as_per == 'Valuation Rate':
rate = self.get_valuation_rate(arg)
elif self.rm_cost_as_per == 'Last Purchase Rate':
rate = arg['last_purchase_rate']
elif self.rm_cost_as_per == "Price List":
if not self.buying_price_list:
frappe.throw(_("Please select Price List"))
rate = frappe.db.get_value("Item Price", {"price_list": self.buying_price_list,
"item_code": arg["item_code"]}, "price_list_rate") or 0
return rate
def update_cost(self):
if self.docstatus == 2:
return
for d in self.get("items"):
rate = self.get_bom_material_detail({'item_code': d.item_code, 'bom_no': d.bom_no,
'qty': d.qty})["rate"]
if rate:
d.rate = rate
if self.docstatus == 1:
self.flags.ignore_validate_update_after_submit = True
self.calculate_cost()
self.save()
self.update_exploded_items()
frappe.msgprint(_("Cost Updated"))
def get_bom_unitcost(self, bom_no):
bom = frappe.db.sql("""select name, total_cost/quantity as unit_cost from `tabBOM`
where is_active = 1 and name = %s""", bom_no, as_dict=1)
return bom and bom[0]['unit_cost'] or 0
def get_valuation_rate(self, args):
""" Get weighted average of valuation rate from all warehouses """
total_qty, total_value, valuation_rate = 0.0, 0.0, 0.0
for d in frappe.db.sql("""select actual_qty, stock_value from `tabBin`
where item_code=%s""", args['item_code'], as_dict=1):
total_qty += flt(d.actual_qty)
total_value += flt(d.stock_value)
if total_qty:
valuation_rate = total_value / total_qty
if valuation_rate <= 0:
last_valuation_rate = frappe.db.sql("""select valuation_rate
from `tabStock Ledger Entry`
where item_code = %s and valuation_rate > 0
order by posting_date desc, posting_time desc, name desc limit 1""", args['item_code'])
valuation_rate = flt(last_valuation_rate[0][0]) if last_valuation_rate else 0
return valuation_rate
def manage_default_bom(self):
""" Uncheck others if current one is selected as default,
update default bom in item master
"""
if self.is_default and self.is_active:
from frappe.model.utils import set_default
set_default(self, "item")
item = frappe.get_doc("Item", self.item)
if item.default_bom != self.name:
item.default_bom = self.name
item.save(ignore_permissions = True)
else:
frappe.db.set(self, "is_default", 0)
item = frappe.get_doc("Item", self.item)
if item.default_bom == self.name:
item.default_bom = None
item.save(ignore_permissions = True)
def clear_operations(self):
if not self.with_operations:
self.set('operations', [])
def validate_main_item(self):
""" Validate main FG item"""
item = self.get_item_det(self.item)
if not item:
frappe.throw(_("Item {0} does not exist in the system or has expired").format(self.item))
else:
ret = frappe.db.get_value("Item", self.item, ["description", "stock_uom", "item_name"])
self.description = ret[0]
self.uom = ret[1]
self.item_name= ret[2]
if not self.quantity:
frappe.throw(_("Quantity should be greater than 0"))
def validate_materials(self):
""" Validate raw material entries """
if not self.get('items'):
frappe.throw(_("Raw Materials cannot be blank."))
check_list = []
for m in self.get('items'):
if m.bom_no:
validate_bom_no(m.item_code, m.bom_no)
if flt(m.qty) <= 0:
frappe.throw(_("Quantity required for Item {0} in row {1}").format(m.item_code, m.idx))
check_list.append(cstr(m.item_code))
unique_chk_list = set(check_list)
if len(unique_chk_list) != len(check_list):
frappe.throw(_("Same item has been entered multiple times."))
def check_recursion(self):
""" Check whether recursion occurs in any bom"""
check_list = [['parent', 'bom_no', 'parent'], ['bom_no', 'parent', 'child']]
for d in check_list:
bom_list, count = [self.name], 0
while (len(bom_list) > count ):
boms = frappe.db.sql(" select %s from `tabBOM Item` where %s = %s " %
(d[0], d[1], '%s'), cstr(bom_list[count]))
count = count + 1
for b in boms:
if b[0] == self.name:
frappe.throw(_("BOM recursion: {0} cannot be parent or child of {2}").format(b[0], self.name))
if b[0]:
bom_list.append(b[0])
def update_cost_and_exploded_items(self, bom_list=[]):
bom_list = self.traverse_tree(bom_list)
for bom in bom_list:
bom_obj = frappe.get_doc("FormisBOM", bom)
bom_obj.on_update()
return bom_list
def traverse_tree(self, bom_list=[]):
def _get_children(bom_no):
return [cstr(d[0]) for d in frappe.db.sql("""select bom_no from `tabBOM Item`
where parent = %s and ifnull(bom_no, '') != ''""", bom_no)]
count = 0
if self.name not in bom_list:
bom_list.append(self.name)
while(count < len(bom_list)):
for child_bom in _get_children(bom_list[count]):
if child_bom not in bom_list:
bom_list.append(child_bom)
count += 1
bom_list.reverse()
return bom_list
def calculate_cost(self):
"""Calculate bom totals"""
self.calculate_op_cost()
self.calculate_rm_cost()
self.total_cost = self.operating_cost + self.raw_material_cost
def calculate_op_cost(self):
"""Update workstation rate and calculates totals"""
self.operating_cost = 0
for d in self.get('operations'):
if d.workstation:
if not d.hour_rate:
d.hour_rate = flt(frappe.db.get_value("Workstation", d.workstation, "hour_rate"))
if d.hour_rate and d.time_in_mins:
d.operating_cost = flt(d.hour_rate) * flt(d.time_in_mins) / 60.0
self.operating_cost += flt(d.operating_cost)
def calculate_rm_cost(self):
"""Fetch RM rate as per today's valuation rate and calculate totals"""
total_rm_cost = 0
for d in self.get('items'):
if d.bom_no:
d.rate = self.get_bom_unitcost(d.bom_no)
d.amount = flt(d.rate, self.precision("rate", d)) * flt(d.qty, self.precision("qty", d))
d.qty_consumed_per_unit = flt(d.qty, self.precision("qty", d)) / flt(self.quantity, self.precision("quantity"))
total_rm_cost += d.amount
self.raw_material_cost = total_rm_cost
def update_exploded_items(self):
""" Update Flat BOM, following will be correct data"""
self.get_exploded_items()
self.add_exploded_items()
def get_exploded_items(self):
""" Get all raw materials including items from child bom"""
self.cur_exploded_items = {}
for d in self.get('items'):
if d.bom_no:
self.get_child_exploded_items(d.bom_no, d.qty)
else:
self.add_to_cur_exploded_items(frappe._dict({
'item_code' : d.item_code,
'item_name' : d.item_name,
'description' : d.description,
'image' : d.image,
'stock_uom' : d.stock_uom,
'qty' : flt(d.qty),
'rate' : flt(d.rate),
}))
def add_to_cur_exploded_items(self, args):
if self.cur_exploded_items.get(args.item_code):
self.cur_exploded_items[args.item_code]["qty"] += args.qty
else:
self.cur_exploded_items[args.item_code] = args
def get_child_exploded_items(self, bom_no, qty):
""" Add all items from Flat BOM of child BOM"""
# Did not use qty_consumed_per_unit in the query, as it leads to rounding loss
child_fb_items = frappe.db.sql("""select bom_item.item_code, bom_item.item_name, bom_item.description,
bom_item.stock_uom, bom_item.qty, bom_item.rate,
bom_item.qty / ifnull(bom.quantity, 1) as qty_consumed_per_unit
from `tabBOM Explosion Item` bom_item, tabBOM bom
where bom_item.parent = bom.name and bom.name = %s and bom.docstatus = 1""", bom_no, as_dict = 1)
for d in child_fb_items:
self.add_to_cur_exploded_items(frappe._dict({
'item_code' : d['item_code'],
'item_name' : d['item_name'],
'description' : d['description'],
'stock_uom' : d['stock_uom'],
'qty' : d['qty_consumed_per_unit']*qty,
'rate' : flt(d['rate']),
}))
def add_exploded_items(self):
"Add items to Flat BOM table"
frappe.db.sql("""delete from `tabBOM Explosion Item` where parent=%s""", self.name)
self.set('exploded_items', [])
for d in sorted(self.cur_exploded_items, key=itemgetter(0)):
ch = self.append('exploded_items', {})
for i in self.cur_exploded_items[d].keys():
ch.set(i, self.cur_exploded_items[d][i])
ch.amount = flt(ch.qty) * flt(ch.rate)
ch.qty_consumed_per_unit = flt(ch.qty) / flt(self.quantity)
ch.docstatus = self.docstatus
ch.db_insert()
def validate_bom_links(self):
if not self.is_active:
act_pbom = frappe.db.sql("""select distinct bom_item.parent from `tabBOM Item` bom_item
where bom_item.bom_no = %s and bom_item.docstatus = 1
and exists (select * from `tabBOM` where name = bom_item.parent
and docstatus = 1 and is_active = 1)""", self.name)
if act_pbom and act_pbom[0][0]:
frappe.throw(_("Cannot deactivate or cancel BOM as it is linked with other BOMs"))
def validate_operations(self):
if self.with_operations and not self.get('operations'):
frappe.throw(_("Operations cannot be left blank"))
if self.with_operations:
for d in self.operations:
if not d.description:
d.description = frappe.db.get_value('Operation', d.operation, 'description')
def get_bom_items_as_dict(bom, company, qty=1, fetch_exploded=1):
item_dict = {}
# Did not use qty_consumed_per_unit in the query, as it leads to rounding loss
query = """select
bom_item.item_code,
item.item_name,
sum(bom_item.qty/ifnull(bom.quantity, 1)) * %(qty)s as qty,
item.description,
item.image,
item.stock_uom,
item.default_warehouse,
item.expense_account as expense_account,
item.buying_cost_center as cost_center
from
`tab{table}` bom_item, `tabBOM` bom, `tabItem` item
where
bom_item.parent = bom.name
and bom_item.docstatus < 2
and bom_item.parent = %(bom)s
and item.name = bom_item.item_code
and is_stock_item = 1
{conditions}
group by item_code, stock_uom"""
if fetch_exploded:
query = query.format(table="BOM Explosion Item",
conditions="""and item.is_sub_contracted_item = 0""")
items = frappe.db.sql(query, { "qty": qty, "bom": bom }, as_dict=True)
else:
query = query.format(table="BOM Item", conditions="")
items = frappe.db.sql(query, { "qty": qty, "bom": bom }, as_dict=True)
# make unique
for item in items:
if item_dict.has_key(item.item_code):
item_dict[item.item_code]["qty"] += flt(item.qty)
else:
item_dict[item.item_code] = item
for item, item_details in item_dict.items():
for d in [["Account", "expense_account", "default_expense_account"],
["Cost Center", "cost_center", "cost_center"], ["Warehouse", "default_warehouse", ""]]:
company_in_record = frappe.db.get_value(d[0], item_details.get(d[1]), "company")
if not item_details.get(d[1]) or (company_in_record and company != company_in_record):
item_dict[item][d[1]] = frappe.db.get_value("Company", company, d[2]) if d[2] else None
return item_dict
@frappe.whitelist()
def get_bom_items(bom, company, qty=1, fetch_exploded=1):
items = get_bom_items_as_dict(bom, company, qty, fetch_exploded).values()
items.sort(lambda a, b: a.item_code > b.item_code and 1 or -1)
return items
def validate_bom_no(item, bom_no):
"""Validate BOM No of sub-contracted items"""
bom = frappe.get_doc("FormisBOM", bom_no)
if not bom.is_active:
frappe.throw(_("BOM {0} must be active").format(bom_no))
if bom.docstatus != 1:
if not getattr(frappe.flags, "in_test", False):
frappe.throw(_("BOM {0} must be submitted").format(bom_no))
if item and not (bom.item.lower() == item.lower() or \
bom.item.lower() == cstr(frappe.db.get_value("Item", item, "variant_of")).lower()):
frappe.throw(_("BOM {0} does not belong to Item {1}").format(bom_no, item))
@frappe.whitelist()
def get_children(parent=None):
if parent:
return frappe.db.sql("""select item_code,
bom_no as value, qty,
if(ifnull(bom_no, "")!="", 1, 0) as expandable
from `tabBOM Item`
where parent=%s
order by idx
""", parent, as_dict=True)
| null |
formis/formis/doctype/formisbom/formisbom.py
|
formisbom.py
|
py
| 15,279 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "frappe.model.document.Document",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "frappe.db.sql_list",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "frappe.utils.cint",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "erpnext.utilities.transaction_base.validate_uom_is_integer",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "frappe.db.set",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "frappe.db.set",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "frappe.db.sql",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "frappe.throw",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "frappe._",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "frappe.throw",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "frappe._",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "frappe.form_dict.get",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "frappe.form_dict",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "frappe.utils.cstr",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "frappe.throw",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "frappe._",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "frappe.db.get_value",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "frappe.msgprint",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "frappe._",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "frappe.db.sql",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "frappe.db.sql",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "frappe.utils.flt",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "frappe.utils.flt",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "frappe.db.sql",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "frappe.utils.flt",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "frappe.model.utils.set_default",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "frappe.get_doc",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "frappe.db.set",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "frappe.get_doc",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "frappe.throw",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "frappe._",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "frappe.db.get_value",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 206,
"usage_type": "attribute"
},
{
"api_name": "frappe.throw",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "frappe._",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "frappe.throw",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "frappe._",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "frappe.utils.flt",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "frappe.throw",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "frappe._",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "frappe.utils.cstr",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "frappe.throw",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "frappe._",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "frappe.db.sql",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 236,
"usage_type": "attribute"
},
{
"api_name": "frappe.utils.cstr",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "frappe.throw",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "frappe._",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "frappe.get_doc",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "frappe.utils.cstr",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "frappe.db.sql",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 255,
"usage_type": "attribute"
},
{
"api_name": "frappe.utils.flt",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "frappe.db.get_value",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 282,
"usage_type": "attribute"
},
{
"api_name": "frappe.utils.flt",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "frappe.utils.flt",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "frappe.utils.flt",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "frappe.utils.flt",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "frappe._dict",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "frappe.utils.flt",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "frappe.utils.flt",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "frappe.db.sql",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 332,
"usage_type": "attribute"
},
{
"api_name": "frappe._dict",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "frappe.utils.flt",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "frappe.db.sql",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 350,
"usage_type": "attribute"
},
{
"api_name": "operator.itemgetter",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "frappe.utils.flt",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "frappe.utils.flt",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "frappe.db.sql",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 363,
"usage_type": "attribute"
},
{
"api_name": "frappe.throw",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "frappe._",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "frappe.throw",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "frappe._",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "frappe.db.get_value",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 378,
"usage_type": "attribute"
},
{
"api_name": "frappe.db.sql",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 408,
"usage_type": "attribute"
},
{
"api_name": "frappe.db.sql",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 411,
"usage_type": "attribute"
},
{
"api_name": "frappe.utils.flt",
"line_number": 416,
"usage_type": "call"
},
{
"api_name": "frappe.db.get_value",
"line_number": 423,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 423,
"usage_type": "attribute"
},
{
"api_name": "frappe.db.get_value",
"line_number": 425,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 425,
"usage_type": "attribute"
},
{
"api_name": "frappe.whitelist",
"line_number": 429,
"usage_type": "call"
},
{
"api_name": "frappe.get_doc",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "frappe.throw",
"line_number": 439,
"usage_type": "call"
},
{
"api_name": "frappe._",
"line_number": 439,
"usage_type": "call"
},
{
"api_name": "frappe.flags",
"line_number": 441,
"usage_type": "attribute"
},
{
"api_name": "frappe.throw",
"line_number": 442,
"usage_type": "call"
},
{
"api_name": "frappe._",
"line_number": 442,
"usage_type": "call"
},
{
"api_name": "frappe.utils.cstr",
"line_number": 444,
"usage_type": "call"
},
{
"api_name": "frappe.db.get_value",
"line_number": 444,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 444,
"usage_type": "attribute"
},
{
"api_name": "frappe.throw",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "frappe._",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "frappe.db.sql",
"line_number": 450,
"usage_type": "call"
},
{
"api_name": "frappe.db",
"line_number": 450,
"usage_type": "attribute"
},
{
"api_name": "frappe.whitelist",
"line_number": 447,
"usage_type": "call"
}
] |
416261428
|
""" KNN - Telecommunications Dataset
KNN is a supervised learnig algorithm used for classification, based on the correpsonding points of a given data points. Once a point is to be predicted, it take into account the 'k' nearest points to determine its classification. """
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import itertools
from matplotlib.ticker import NullFormatter
import matplotlib.ticker as ticker
from sklearn import preprocessing
from IPython.display import display
df = pd.read_csv("teleCust1000t.csv")
#display(df.head())
#Contains segemented data for its customer base by service usage patterns, categorising them based on deographic data. The target field - custcat - has four possible values according to the four possible customer services. Objective is to build a classfier that predicts the class of unkown cases using a KNN algorithm.
#display(df['custcat'].value_counts())
# 281 plus service (3), 266 basic service (1), 236 total service (4), and 217 E service (4).
#Use sklearn convert pandas data frame to a numpy array, then standarise data to get zero mean and unit variance - this is good practice.
X = df[['region', 'tenure','age', 'marital', 'address', 'income', 'ed', 'employ','retire', 'gender', 'reside']] .values #.astype(float)
X[0:5]
y = df['custcat'].values
y[0:5]
X = preprocessing.StandardScaler().fit(X).transform(X.astype(float))
X[0:5]
#Train Test Split - inc. out of sample accuracy
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=4)
#print ('Train set:', X_train.shape, y_train.shape)
#print ('Test set:', X_test.shape, y_test.shape)
from sklearn.neighbors import KNeighborsClassifier
k = 10
#Train Model and Predict
neigh = KNeighborsClassifier(n_neighbors = k).fit(X_train,y_train)
neigh
yhat = neigh.predict(X_test)
yhat[0:5]
#Accuracy Evaluation
from sklearn import metrics
#print("Train set Accuracy: ", metrics.accuracy_score(y_train, neigh.predict(X_train)))
#print("Test set Accuracy: ", metrics.accuracy_score(y_test, yhat))
#Accuracy of KNN for different Ks
Ks = 10
mean_acc = np.zeros((Ks-1))
std_acc = np.zeros((Ks-1))
ConfustionMx = [];
for n in range(1,Ks):
#Train Model and Predict
neigh = KNeighborsClassifier(n_neighbors = n).fit(X_train,y_train)
yhat=neigh.predict(X_test)
mean_acc[n-1] = metrics.accuracy_score(y_test, yhat)
std_acc[n-1]=np.std(yhat==y_test)/np.sqrt(yhat.shape[0])
mean_acc
plt.plot(range(1,Ks),mean_acc,'g')
plt.fill_between(range(1,Ks),mean_acc - 1 * std_acc,mean_acc + 1 * std_acc, alpha=0.10)
plt.legend(('Accuracy ', '+/- 3xstd'))
plt.ylabel('Accuracy ')
plt.xlabel('Number of Neighbours (K)')
plt.tight_layout()
plt.show()
print("The best accuracy was with", mean_acc.max(), "with k=", mean_acc.argmax()+1)
| null |
KNN.py
|
KNN.py
|
py
| 2,847 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sklearn.neighbors.KNeighborsClassifier",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "sklearn.neighbors.KNeighborsClassifier",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "numpy.std",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.fill_between",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 65,
"usage_type": "name"
}
] |
133539001
|
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
class FbPyTestcase2(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "https://www.facebook.com/"
self.verificationErrors = []
self.accept_next_alert = True
def test_fb_py_testcase2(self):
driver = self.driver
driver.get(self.base_url + "/saved/?cref=28&collection_token=100001097280457%3A586254444758776%3A102")
# ERROR: Caught exception [ERROR: Unsupported command [selectWindow | name=_e_01Xz | ]]
driver.find_element_by_id("email").clear()
time.sleep(1)
driver.find_element_by_id("email").send_keys("0755491753")
time.sleep(1)
driver.find_element_by_id("pass").clear()
time.sleep(1)
driver.find_element_by_id("pass").send_keys("PASSWORD GOES HERE")
time.sleep(1)
driver.find_element_by_id("loginbutton").click()
time.sleep(1)
driver.get(self.base_url + "/saved/?cref=28&collection_token=100001097280457%3A586254444758776%3A102")
driver.find_element_by_xpath("//a[contains(text(),'...')]").click()
driver.find_element_by_xpath("//span[contains(text(),'Unsave')]").click()
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException as e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException as e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.close()
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| null |
facebook archived saves removal.py
|
facebook archived saves removal.py
|
py
| 2,447 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "unittest.TestCase",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.Firefox",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "selenium.common.exceptions.NoSuchElementException",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "selenium.common.exceptions.NoAlertPresentException",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "unittest.main",
"line_number": 63,
"usage_type": "call"
}
] |
270410244
|
"""
fotochest.apps.photo_manager.urls
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
from django.conf.urls import patterns, url
from fotochest.apps.photo_manager import views, feeds
urlpatterns = patterns('',
# Public URLS
url(r'^$', views.HomepageListView.as_view(), name="homepage"),
url(r'^photos/(?P<album_slug>[-\w]+)/(?P<photo_slug>[-\w]+)/$', views.PhotoDetailView.as_view(), name="regular_photo_url"),
url(r'^photos/(?P<album_slug>[-\w]+)/(?P<photo_slug>[-\w]+)/fullscreen/$', views.PhotoFullScreen.as_view(), name="photo_fullscreen"),
# ShortURL
url(r'^f/(?P<photo_id>\d+)/$', views.PhotoDetailView.as_view(), name="short_photo_url"),
#download
url(r'^photos/download/(?P<photo_id>\d+)/$', views.photo_download, name="photo_download"),
# Map - This is not ideal. Should we have a maps.urls?
url(r'map/$', views.LocationsListView.as_view(), name="map"),
url(r'map/(?P<location_slug>[-\w]+)/$', views.PhotoLocationsListView.as_view(), name="photo_location"),
# Feeds
url(r'^feed/$', feeds.StreamFeed(), name="homepage_feed"),
url(r'^album/(?P<album_slug>[-\w]+)/feed/$', feeds.AlbumStream(), name="album_stream"),
# Albums
url(r'^albums/$', views.AlbumListView.as_view(), name="albums"),
url(r'^album/(?P<album_slug>[-\w]+)/$', views.AlbumDetailView.as_view(), name='album_detail'),
)
| null |
fotochest/apps/photo_manager/urls.py
|
urls.py
|
py
| 1,429 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.conf.urls.patterns",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "fotochest.apps.photo_manager.views.HomepageListView.as_view",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "fotochest.apps.photo_manager.views.HomepageListView",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "fotochest.apps.photo_manager.views",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "fotochest.apps.photo_manager.views.PhotoDetailView.as_view",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "fotochest.apps.photo_manager.views.PhotoDetailView",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "fotochest.apps.photo_manager.views",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "fotochest.apps.photo_manager.views.PhotoFullScreen.as_view",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "fotochest.apps.photo_manager.views.PhotoFullScreen",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "fotochest.apps.photo_manager.views",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "fotochest.apps.photo_manager.views.PhotoDetailView.as_view",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "fotochest.apps.photo_manager.views.PhotoDetailView",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "fotochest.apps.photo_manager.views",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "fotochest.apps.photo_manager.views.photo_download",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "fotochest.apps.photo_manager.views",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "fotochest.apps.photo_manager.views.LocationsListView.as_view",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "fotochest.apps.photo_manager.views.LocationsListView",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "fotochest.apps.photo_manager.views",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "fotochest.apps.photo_manager.views.PhotoLocationsListView.as_view",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "fotochest.apps.photo_manager.views.PhotoLocationsListView",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "fotochest.apps.photo_manager.views",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "fotochest.apps.photo_manager.feeds.StreamFeed",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "fotochest.apps.photo_manager.feeds",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "fotochest.apps.photo_manager.feeds.AlbumStream",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "fotochest.apps.photo_manager.feeds",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "fotochest.apps.photo_manager.views.AlbumListView.as_view",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "fotochest.apps.photo_manager.views.AlbumListView",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "fotochest.apps.photo_manager.views",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "fotochest.apps.photo_manager.views.AlbumDetailView.as_view",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "fotochest.apps.photo_manager.views.AlbumDetailView",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "fotochest.apps.photo_manager.views",
"line_number": 36,
"usage_type": "name"
}
] |
376556241
|
from django.db import models
from django.conf import settings
from carro.models import Carro
from django.contrib.auth.models import User
from cliente.models import Direccion
# Create your models here.
class Orden(models.Model):
ESTADO = (
('1','Esperando Pago'),
('2','Cancelado'),
('3','Enviado'),
('4','Pago Aceptado'),
('5','Error de pago'),
('6','Devuelto'),
)
numero = models.CharField(max_length=128,db_index=True)
carro = models.ForeignKey(Carro,null=True,blank=True)
usuario = models.ForeignKey(User,blank=True,null=True)
modena = models.CharField(max_length=120,default=settings.CURRENCY_DEFAULT)
total = models.DecimalField(decimal_places=2, max_digits=12)
gasto_envio = models.DecimalField(decimal_places=2,max_digits=12)
direccion_envio = models.ForeignKey(Direccion,blank=True,null=True)
metodo_envio = models.CharField(max_length=100,blank=True,null=True)
fecha_compra = models.DateTimeField(auto_now_add=True, db_index=True)
estado = models.CharField(max_length=100,choices=ESTADO)
class Meta:
verbose_name = 'Orden'
verbose_name_plural = 'Ordenes'
| null |
orden/models.py
|
models.py
|
py
| 1,098 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.db.models.Model",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "carro.models",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "carro.models.Carro",
"line_number": 18,
"usage_type": "argument"
},
{
"api_name": "django.db.models",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name": "django.db.models",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.CURRENCY_DEFAULT",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.db.models.DecimalField",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.db.models.DecimalField",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cliente.models.Direccion",
"line_number": 23,
"usage_type": "argument"
},
{
"api_name": "django.db.models",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 26,
"usage_type": "name"
}
] |
418131302
|
import boto3
import time
import json
import logging
from typing import Dict, Optional
from botocore.exceptions import ClientError
from config import CONFIG
client = boto3.client("sqs", region_name="ap-northeast-2")
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s : %(message)s',
datefmt="%Y-%m-%d %H:%M:%S")
logger = logging.getLogger(__name__)
class SQS:
@classmethod
def send_message(cls, sqs_url: str, message_body: Dict) -> int:
response = client.send_message(QueueUrl=sqs_url, MessageBody=json.dumps(message_body))
# {'MD5OfMessageBody': 'f4c60d9169cf09c73341853cf04c9392',
# 'MessageId': '74fd9bec-7942-46ed-9a55-c06d1dac5288',
# 'ResponseMetadata': {'RequestId': 'e78a8b3e-5fef-5439-a9b6-d3962df5b3a1',
# 'HTTPStatusCode': 200,
# 'HTTPHeaders': {'x-amzn-requestid': 'e78a8b3e-5fef-5439-a9b6-d3962df5b3a1',
# 'date': 'Thu, 18 Feb 2021 13:47:06 GMT',
# 'content-type': 'text/xml',
# 'content-length': '378'},
# 'RetryAttempts': 0}}
return response["ResponseMetadata"]["HTTPStatusCode"]
@classmethod
def receive_message(cls, sqs_url: str, WaitTimeSeconds=20) -> Optional[Dict]:
response = client.receive_message(QueueUrl=sqs_url, MaxNumberOfMessages=1, WaitTimeSeconds=WaitTimeSeconds)
# no message
# {'ResponseMetadata': {'RequestId': 'b3ae256f-45f4-5c03-8c32-7dc733716d9e',
# 'HTTPStatusCode': 200,
# 'HTTPHeaders': {'x-amzn-requestid': 'b3ae256f-45f4-5c03-8c32-7dc733716d9e',
# 'date': 'Thu, 18 Feb 2021 14:06:07 GMT',
# 'content-type': 'text/xml',
# 'content-length': '240'},
# 'RetryAttempts': 0}}
# message exists
# {'Messages': [{'MessageId': '74fd9bec-7942-46ed-9a55-c06d1dac5288',
# 'ReceiptHandle': 'AQEBb9S/N/NCEhYtmJOh2JM7OM1+A2aSqrWz460AuCgW4e2hzTYqFO5TBCqKM1E6MOgyCS6cwKcOWMC1lvW95GBlUTlmDKGNwyszNgOdxUI7qZWAQrN4zsi0rnywQo4/SAPFezIVdnzH3T8Uyc9fSo+5M0Z5FS5Oeiput5DPy/6r2nrm8Joh0CxW393TiKZogI1uPtNcRz9HDLt1i1Kokk3DgnqlbMZnXlEFoV31tJr3zi8jbhUOHshQbRGphafOGzeUipm3HYXvgOXUl/21hR99Wo2Rw5UZTpGFrf0G9VeIb7LHaXCtaEJY1/mq2d5zj4+6sPh/9JYOK1hM0mEl1E4rmnhkzVKcUX7EWWt93b4mDMLsZuOixtW4eSlfbaLFtHZNffu1b7vFf75sV5A+TfbZ2A==',
# 'MD5OfBody': 'f4c60d9169cf09c73341853cf04c9392',
# 'Body': '{"request_id": 1, "create_date_time": "2021-02-18 22:33:48"}'}],
# 'ResponseMetadata': {'RequestId': '87ffb197-19dc-5757-89de-e93f6bc120cf',
# 'HTTPStatusCode': 200,
# 'HTTPHeaders': {'x-amzn-requestid': '87ffb197-19dc-5757-89de-e93f6bc120cf',
#
# 'date': 'Thu, 18 Feb 2021 13:47:09 GMT',
# 'content-type': 'text/xml',
# 'content-length': '941'},
# 'RetryAttempts': 0}}
if "Messages" in response:
sqs_message = response["Messages"][0]
message_body = json.loads(sqs_message["Body"])
logger.info(f"message received: {sqs_message}")
return {"ReceiptHandle": sqs_message["ReceiptHandle"], "Body": message_body}
else:
logger.error(f"No message to receive.")
return None
@classmethod
def delete_message(cls, sqs_url, ReceiptHandle):
response = client.delete_message(QueueUrl=sqs_url, ReceiptHandle=ReceiptHandle)
# {'ResponseMetadata': {'RequestId': '35f7be1d-5bd8-5784-8869-e6b52db3bbc1',
# 'HTTPStatusCode': 200,
# 'HTTPHeaders': {'x-amzn-requestid': '35f7be1d-5bd8-5784-8869-e6b52db3bbc1',
# 'date': 'Thu, 18 Feb 2021 14:05:31 GMT',
# 'content-type': 'text/xml',
# 'content-length': '215'},
# 'RetryAttempts': 0}}
logger.info(f"message {ReceiptHandle} deleted")
| null |
model_nlp/src/utils/sqs.py
|
sqs.py
|
py
| 4,390 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "boto3.client",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 33,
"usage_type": "name"
}
] |
72917107
|
import collections
from cffi import FFI
from enum import IntEnum
import os
import sys
__all__ = ['Options', 'Info', 'Entry', 'FTS']
def _cmp(x, y):
if x < y:
return -1
elif y < x:
return 1
else:
return 0
def key_to_cmp(key, reverse=False):
if key:
if reverse:
def cmp(x, y):
return _cmp(key(y), key(x))
else:
def cmp(x, y):
return _cmp(key(x), key(y))
else:
if reverse:
def cmp(x, y):
return _cmp(y, x)
else:
def cmp(x, y):
return _cmp(x, y)
return cmp
class Options(IntEnum):
COMFOLLOW = 0x001 # follow command line symlinks
LOGICAL = 0x002 # logical walk
NOCHDIR = 0x004 # don't change directories
NOSTAT = 0x008 # don't get stat info
PHYSICAL = 0x010 # physical walk
SEEDOT = 0x020 # return dot and dot-dot
XDEV = 0x040 # don't cross devices
WHITEOUT = 0x080 # return whiteout information
COMFOLLOWDIR= 0x400 # (non-std) follow command line symlinks for directories only
OPTIONMASK = 0x4ff # valid user option mask
NAMEONLY = 0x100 # (private) child names only
STOP = 0x200
class SetOptions(IntEnum):
AGAIN = 1
FOLLOW = 2
NOINSTR = 3
SKIP = 4
class Info(IntEnum):
D = 1 # preorder directory
DC = 2 # directory that causes cycles
DEFAULT = 3 # none of the above
DNR = 4 # unreadable directory
DOT = 5 # dot or dot-dot
DP = 6 # postorder directory
ERR = 7 # error; errno is set
F = 8 # regular file
INIT = 9 # initialized only
NS = 10 # stat(2) failed
NSOK = 11 # no stat(2) requested
SL = 12 # symbolic link
SLNONE = 13 # symbolic link without target
W = 14 # whiteout object
def is_dir(self):
return self in (Info.D, Info.DC, Info.DNR, Info.DOT, Info.DP)
def is_file(self):
return self == Info.F
def is_symlink(self):
return self in (Info.SL, Info.SLNONE)
def is_error(self):
return self in (Info.DNR, Info.ERR, Info.NS)
def has_stat(self):
return self not in (Info.DNR, Info.ERR, Info.NS, Info.NSOK)
def get_sizes():
types = ('dev_t', 'mode_t', 'nlink_t', 'ino_t', 'uid_t', 'gid_t',
'off_t', 'blkcnt_t', 'blksize_t', 'time_t')
ffi = FFI()
ffi.cdef(''.join('#define SIZE_OF_{} ...\n'.format(t) for t in types))
lib = ffi.verify('#include <sys/types.h>\n' +
''.join('#define SIZE_OF_{} sizeof({})\n'.format(t, t)
for t in types))
return ''.join(
'typedef uint{}_t {};\n'.format(getattr(lib, 'SIZE_OF_'+t)*8, t)
for t in types)
ffi = FFI()
ffi.cdef(get_sizes() + """
typedef unsigned short u_short;
struct timespec {
time_t tv_sec;
long tv_nsec;
...;
};
/* Note that POSIX does not require the timespec fields, or even mention
them beyond saying "The timespec structure may be defined as described
in <time.h>." However, both BSD (including OS X) and linux define them
like this. */
struct stat {
dev_t st_dev; /* ID of device containing file */
mode_t st_mode; /* Mode of file (see below) */
nlink_t st_nlink; /* Number of hard links */
ino_t st_ino; /* File serial number */
uid_t st_uid; /* User ID of the file */
gid_t st_gid; /* Group ID of the file */
dev_t st_rdev; /* Device ID */
struct timespec st_atimespec; /* time of last access */
struct timespec st_mtimespec; /* time of last data modification */
struct timespec st_ctimespec; /* time of last status change */
struct timespec st_birthtimespec; /* time of file creation(birth) */
off_t st_size; /* file size, in bytes */
blkcnt_t st_blocks; /* blocks allocated for file */
blksize_t st_blksize; /* optimal blocksize for I/O */
};
typedef ... FTS;
typedef struct _ftsent {
u_short fts_info; /* flags for FTSENT structure */
char *fts_accpath; /* access path */
char *fts_path; /* root path */
u_short fts_pathlen; /* strlen(fts_path) */
char fts_name[]; /* file name */
u_short fts_namelen; /* strlen(fts_name) */
short fts_level; /* depth (-1 to N) */
int fts_errno; /* file errno */
long fts_number; /* local numeric value */
void *fts_pointer; /* local address value */
struct ftsent *fts_parent; /* parent directory */
struct ftsent *fts_link; /* next file structure */
struct ftsent *fts_cycle; /* cycle structure */
struct stat *fts_statp; /* stat(2) information */
...;
} FTSENT;
FTS *
fts_open(char * const *path_argv, int options,
int (*compar)(const FTSENT **, const FTSENT **));
FTSENT *
fts_read(FTS *ftsp);
FTSENT *
fts_children(FTS *ftsp, int options);
int
fts_set(FTS *ftsp, FTSENT *f, int options);
int
fts_close(FTS *ftsp);
""")
libc = ffi.verify("""
#include <sys/types.h>
#include <sys/stat.h>
#include <fts.h>
""")
def _make_error(*args, code=None):
if code is None:
code = ffi.errno
return OSError(code, os.strerror(code), *args)
def _make_path(ent):
if isinstance(ent, Entry):
ent = ent.ftsent
if ent.fts_path:
return ffi.string(ent.fts_path).decode(sys.getfilesystemencoding())
else:
return ffi.string(ent.fts_name).decode(sys.getfilesystemencoding())
class Entry:
__slots__ = ('ftsent', '_nostat', '_stat')
def __init__(self, ftsent, nostat=False):
self.ftsent = ftsent
self._nostat = nostat
self._stat = None
@property
def info(self):
return Info(self.ftsent.fts_info)
@staticmethod
def _stringify(s):
return ffi.string(s).decode(sys.getfilesystemencoding()) if s else '<NULL>'
@property
def accpath(self):
return self._stringify(self.ftsent.fts_accpath)
@property
def path(self):
return self._stringify(self.ftsent.fts_path)
@property
def name(self):
return self._stringify(self.ftsent.fts_name)
@property
def level(self):
return self.ftsent.fts_level
@property
def errno(self):
return self.ftsent.fts_errno
@property
def stat(self):
if self._nostat:
return None
if self._stat is None:
if not self.info.has_stat():
self._nostat = True
return None
def dts(ts):
return ts.tv_sec + ts.tv_nsec / 1000000000
def dtsns(ts):
return ts.tv_sec * 1000000000 + ts.tv_nsec
# Note: This works in CPython 3.4 on platforms that have all of
# the relevant fields (which includes BSD/OS X and Linux).
statp = self.ftsent.fts_statp
self._stat = os.stat_result((statp.st_mode, statp.st_ino, statp.st_dev,
statp.st_nlink, statp.st_uid, statp.st_gid,
statp.st_size,
dts(statp.st_atimespec),
dts(statp.st_mtimespec),
dts(statp.st_ctimespec),
int(dts(statp.st_atimespec)),
int(dts(statp.st_mtimespec)),
int(dts(statp.st_ctimespec)),
dtsns(statp.st_atimespec),
dtsns(statp.st_mtimespec),
dtsns(statp.st_ctimespec),
statp.st_blksize, statp.st_blocks,
0, 0, 0, # rdev, flags, gen
dts(statp.st_birthtimespec)))
return self._stat
class FTS:
def __init__(self, *paths, options=None, key=None, reverse=False):
"""FTS(path, options=None, key=None, reverse=False)
Begins an FTS traversal on the specified paths, iterating the
filesystem tree in depth-first order, yielding Entry structures.
Note that by default, unlike os.walk, directories are yielded twice,
both before and after their contents. (See Info for details.)
The paths can be either strings (in which case they're assumed to
be in the default file system encoding) or bytes.
The options are a bitmask of Options values, defaulting to
PHYSICAL if nothing is passed.
If a key function is given, it will be called on pairs of Entry
structures; otherwise, the traversal order is the order of the
paths, and the native directory entry order within each path.
Note that accpath and path cannot be used (but name can), and
stat can only be used if stat was called (info is not NS or NSOK).
The key objects only need to define <, and it is legal for two
objects to be incomparable (that is, not x<y and not y<x).
See your platform's fts(3) documentation for more details."""
self._fts = None
enc = sys.getfilesystemencoding()
def encpath(path):
try:
return path.encode(enc)
except AttributeError:
return path
def decpath(path):
try:
return path.decode(enc)
except AttributeError:
return path
path_cstrs = [ffi.new("char[]", encpath(path)) for path in paths]
path_cstrs.append(ffi.NULL)
path_argv = ffi.new("char *[]", path_cstrs)
self.paths = [decpath(path) for path in paths]
self.options = options
if options is None:
options = 0
if not options | Options.PHYSICAL | Options.LOGICAL:
options |= Options.PHYSICAL
if key:
compar = ffi.callback("int(FTSENT **, FTSENT**)",
key_to_cmp(key, reverse))
else:
compar = ffi.NULL
self._fts = libc.fts_open(path_argv, options, compar)
if not self._fts:
raise _make_error(':'.join(self.paths))
def close(self):
if self._fts:
libc.fts_close(self._fts)
self._fts = None
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
def __iter__(self):
return self
def __next__(self):
ent = libc.fts_read(self._fts)
if not ent:
if ffi.errno:
raise _make_error(':'.join(self.paths))
else:
raise StopIteration
return Entry(ent, self.options & Options.NOSTAT)
#def children(self):
# ent = children(self._fts), walk the links and return a sequence
def set(self, entry, option):
if isinstance(entry, Entry):
entry = entry.ftsent
if libc.fts_set(self._fts, entry, option):
raise _make_error(_make_path(entry))
def again(self, ent):
self.set(ent, SetOptions.AGAIN)
def follow(self, ent):
self.set(ent, SetOptions.FOLLOW)
def skip(self, ent):
self.set(ent, SetOptions.SKIP)
if __name__ == '__main__':
args = sys.argv[1:] if len(sys.argv) > 1 else '.'
with FTS(*args, options=Options.COMFOLLOW | Options.NOSTAT) as f:
for e in f:
#if e.info==Info.D and e.name.startswith('_'):
# f.skip(e)
#elif e.info==Info.SL:
# f.follow(e)
if e.info.is_error():
path = _make_error(_make_path(e), code=e.errno)
else:
path = e.path
print(e.info.name.ljust(7), e.level, path)
| null |
fts.py
|
fts.py
|
py
| 12,549 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "enum.IntEnum",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "enum.IntEnum",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "enum.IntEnum",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "cffi.FFI",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "cffi.FFI",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "os.strerror",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "sys.getfilesystemencoding",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "sys.getfilesystemencoding",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "sys.getfilesystemencoding",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "os.stat_result",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "sys.getfilesystemencoding",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 327,
"usage_type": "attribute"
}
] |
449547060
|
import pandas as pd
import numpy as np
import copy
from ann2 import Net
from replicate import replicate_data
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
from train import train
import csv
# Load training data as pd dataframe and convert pd dataframe into numpy array.
training_data = pd.read_excel('Data3/reduced_training_data.xlsx')
training_data_array = np.array(training_data)
# Standardise Training Data
scaler_train = StandardScaler()
scaler_train.fit(training_data)
# Split data into k=6 folds.
kf = KFold(n_splits=6)
kf.get_n_splits(training_data)
# Split training data set into 6 subsets containing k-1 folds before optimisation.
class wrapper(object):
def __init__(self):
self.value = []
subset_train1 = wrapper()
subset_train2 = wrapper()
subset_train3 = wrapper()
subset_train4 = wrapper()
subset_train5 = wrapper()
subset_train6 = wrapper()
subset_test1 = wrapper()
subset_test2 = wrapper()
subset_test3 = wrapper()
subset_test4 = wrapper()
subset_test5 = wrapper()
subset_test6 = wrapper()
subset_train_list = [subset_train1, subset_train2, subset_train3, subset_train4, subset_train5, subset_train6]
subset_test_list = [subset_test1, subset_test2, subset_test3, subset_test4, subset_test5, subset_test6]
index = 0
for train_index, test_index in kf.split(training_data):
for row in train_index:
subset_train_list[index].value.append(training_data_array[row])
for row in test_index:
subset_test_list[index].value.append(training_data_array[row])
index +=1
# Standardise Test Data
for subset in subset_test_list:
subset.value = scaler_train.transform(subset.value)
# Replicate and Standardise the training data in each subset.
columns = "BC NC LP LI NIC".split()
for index, subset in enumerate(subset_train_list):
df = pd.DataFrame(data=subset.value, index=None, columns=columns)
ref = df
df = scaler_train.transform(df)
replicated_data1 = replicate_data(ref, 50, 0.03)
replicated_data1 = scaler_train.transform(replicated_data1)
df = np.append(df, replicated_data1, axis=0)
replicated_data2 = replicate_data(ref, 50, 0.05)
replicated_data2 = scaler_train.transform(replicated_data2)
df = np.append(df, replicated_data2, axis=0)
subset.value = df
# Calculate training and test labels
for index1, subset in enumerate(subset_train_list):
a = []
try:
for index2, row in enumerate(subset.value):
dBC = subset.value[index2 + 1][0] - row[0]
dNC = subset.value[index2 + 1][1] - row[1]
dLP = subset.value[index2 + 1][2] - row[2]
rates =[dBC, dNC, dLP]
a.append(rates)
except IndexError:
rates = [0, 0, 0]
a.append(rates)
a = np.array(a)
subset.value = np.append(subset.value, a, axis=1)
for index1, subset in enumerate(subset_test_list):
b = []
try:
for index2, row in enumerate(subset.value):
dBC = subset.value[index2 + 1][0] - row[0]
dNC = subset.value[index2 + 1][1] - row[1]
dLP = subset.value[index2 + 1][2] - row[2]
rates =[dBC, dNC, dLP]
b.append(rates)
except IndexError:
rates = [0, 0, 0]
b.append(rates)
b = np.array(b)
subset.value = np.append(subset.value, b, axis=1)
# Remove all datapoints corresponding to 144 h from the training and testing sets
for subset in subset_train_list:
count = 0
decrement = 0
for index, row in enumerate(subset.value):
count +=1
if count == 13:
delete = index - decrement
subset.value = np.delete(subset.value, delete, 0)
decrement += 1
count = 0
for subset in subset_test_list:
subset.value = np.delete(subset.value, -1, 0)
subset_train_list = np.array(subset_train_list)
subset_test_list = np.array(subset_test_list)
# Shuffle Training Data
for subset in subset_train_list:
np.random.shuffle(subset.value)
# k-fold cross validation training loop
HL = 2
HN = [(2, 2), (4, 4)]
EPOCHS = 3000
BATCH_SIZE = 50
LR = 0.001
MODELS = {}
for h1, h2 in HN:
net = Net(h1, h2)
init_state = copy.deepcopy(net.state_dict())
MSEs = []
for index, subset in enumerate(subset_train_list):
subset.value = np.array(subset.value)
subset_test_list[index].value = np.array(subset_test_list[index].value)
net.load_state_dict(init_state)
training_inputs = subset.value[:, 0:5]
training_labels = subset.value[:, 5:]
test_inputs = subset_test_list[index].value[:, 0:5]
test_labels = subset_test_list[index].value[:, 5:]
E_opt, opt_epochs = train(net, training_inputs, training_labels, test_inputs, test_labels, EPOCHS, LR, BATCH_SIZE)
MODELS['F{a}-{b}_{x}-{y}_{z}'.format(a=index+1, b=HL, x=h1, y=h2, z=opt_epochs)] = E_opt
with open('Data3/Search/k_fold_results_{x}HL_hn-e.csv'.format(x=HL), 'w') as f:
for key in MODELS.keys():
f.write("%s: %s\n"%(key, MODELS[key]))
print(MODELS)
| null |
ANN_Stopping_kfold/2HL_k_fold_cross_validation_hn-e.py
|
2HL_k_fold_cross_validation_hn-e.py
|
py
| 5,098 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_excel",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.KFold",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "replicate.replicate_data",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "replicate.replicate_data",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "numpy.delete",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "numpy.delete",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "numpy.random.shuffle",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "ann2.Net",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "train.train",
"line_number": 161,
"usage_type": "call"
}
] |
592019435
|
from lib.clients import Clients
class Repository(Clients):
def __init__(self, slug, sticky_branch):
super().__init__()
self.slug = slug
self.sticky_branch = sticky_branch
self.slug_encoded = slug.replace('/', '%2F')
self.travis_url = self._Clients__travis_client.get_repo_url(slug)
self.github_url = self._Clients__github_client.get_repo_url(slug)
def info(self):
info = {
"slug" : self.slug,
"owner" : self.slug.split('/')[0],
"name" : self.slug.split('/')[1],
"url" : self.travis_url
}
return info
def _last_build_on_default_branch(self, **params):
params['include'] = 'repository.default_branch,branch.last_build,build.commit'
repo = self._Clients__travis_client.repo(self.slug_encoded, **params).json()
default_branch = repo['default_branch']
last_build = default_branch['last_build']
if last_build:
return self.__buildParser(last_build)
return []
def _last_build_on_sticky_branch(self, **params):
params["branch.name"] = self.sticky_branch
params["sort_by"] = "id:desc"
builds = self._Clients__travis_client.builds(self.slug_encoded, **params).json()['builds']
if len(builds) > 0:
return self.__buildParser(builds[0])
return[]
def last_build(self, default_branch=False, **params):
if default_branch:
return self._last_build_on_default_branch(**params)
if self.sticky_branch and len(self.sticky_branch):
return self._last_build_on_sticky_branch(**params)
params['limit'] = 1
builds = self._Clients__travis_client.builds(self.slug_encoded, **params).json()['builds']
if not builds:
return []
return self.__buildParser(builds[0])
def branches(self):
try:
response = self._Clients__github_client.branches(self.slug)
branches = [branch['name'] for branch in response.json()]
except:
branches = []
return branches
def env_vars(self):
env_vars = self._Clients__travis_client.env_vars(self.slug_encoded).json()['env_vars']
return env_vars
def trigger_build(self, branch):
data = {"request": {"branch": branch}}
self._Clients__travis_client.trigger_build(self.slug_encoded, data)
def restart_build(self, buildid):
self._Clients__travis_client.restart_build(buildid)
def cancel_build(self, buildid):
self._Clients__travis_client.cancel_build(buildid)
def __buildParser(self, build):
commit_url = self.github_url + "/commit/{sha}"
build_url = self.travis_url + "/builds/{id}"
last_build = {
"id": build['id'],
"number": build['number'],
"state": build['state'].upper(),
"url": build_url.format(id=build['id']),
"branch": build['branch']['@href'].split('/')[-1],
"commit_sha": build['commit']['sha'][:7],
"commit_author": build['commit']['author']['name'],
"commit_url": commit_url.format(sha=build['commit']['sha'])
}
if build['event_type'] == 'pull_request':
last_build['event'] = 'Pull Request #{}'.format(build['pull_request_number'])
last_build['event_title'] = build['pull_request_title']
else:
last_build['event'] = build['event_type'].title()
last_build['event_title'] = build['commit']['message']
if build['state'] == 'started':
time_event = self._Clients__tools.convert_time_to_age(build['started_at'])
time_event = 'Running for {}'.format(time_event)
last_build['time_event'] = time_event
elif build['state'] != 'created':
time_event = self._Clients__tools.convert_time_to_age(build['finished_at'])
time_event = 'Finished {}'.format(time_event)
last_build['time_event'] = time_event
return last_build
| null |
lib/repository.py
|
repository.py
|
py
| 4,079 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "lib.clients.Clients",
"line_number": 4,
"usage_type": "name"
}
] |
325779110
|
# -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract durations based-on tacotron-2 alignments for FastSpeech."""
import tensorflow as tf
import sys
import argparse
import logging
import os
import numpy as np
import tensorflow_tts as tts
from tqdm import tqdm
from numba import jit
from tensorflow_tts.models import TFTacotron2
from tensorflow_tts.utils import return_strategy
from Processor import JSpeechProcessor
sys.path.append(".")
physical_devices = tf.config.list_physical_devices("GPU")
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
# return strategy
STRATEGY = return_strategy()
class Config(object):
def __init__(self,outdir,batch_size=8,vocab_size=149,n_speakers=1):
# tacotron2 params
self.vocab_size = vocab_size # default
self.embedding_hidden_size = 512 # 'embedding_hidden_size': 512
self.initializer_range = 0.02 # 'initializer_range': 0.02
self.n_speakers = n_speakers # 'n_speakers': 1
self.layer_norm_eps = 1e-6
self.embedding_dropout_prob = 0.1 # 'embedding_dropout_prob': 0.1
self.n_conv_encoder = 5 # 'n_conv_encoder': 5
self.encoder_conv_filters = 512 # 'encoder_conv_filters': 512
self.encoder_conv_kernel_sizes = 5 # 'encoder_conv_kernel_sizes': 5
self.encoder_conv_activation = 'relu' # 'encoder_conv_activation': 'relu'
self.encoder_conv_dropout_rate = 0.5 # 'encoder_conv_dropout_rate': 0.5
self.encoder_lstm_units = 256 # 'encoder_lstm_units': 256
self.n_prenet_layers = 2 # 'n_prenet_layers': 2
self.prenet_units = 256 # 'prenet_units': 256
self.prenet_activation = 'relu' # 'prenet_activation': 'relu'
self.prenet_dropout_rate = 0.5 # 'prenet_dropout_rate': 0.5
self.decoder_lstm_units = 1024 # 'decoder_lstm_units': 1024
self.n_lstm_decoder = 1 # 'n_lstm_decoder': 1
self.attention_type = 'lsa' # 'attention_type': 'lsa'
self.attention_dim = 128 # 'attention_dim': 128
self.attention_filters = 32 # 'attention_filters': 32
self.attention_kernel = 31 # 'attention_kernel': 31
self.n_mels = 20 # 'n_mels': 80
self.reduction_factor = 1 # 'reduction_factor': 1
self.n_conv_postnet = 5 # 'n_conv_postnet': 5
self.postnet_conv_filters = 512 # 'postnet_conv_filters': 512
self.postnet_conv_kernel_sizes = 5 # 'postnet_conv_kernel_sizes': 5
self.postnet_dropout_rate = 0.1 # 'postnet_dropout_rate': 0.1
# data
self.batch_size = batch_size
self.test_size = 0.05
self.mel_length_threshold = 0
self.guided_attention = 0.2
# optimizer
self.initial_learning_rate = 0.001
self.end_learning_rate = 0.00001
self.decay_steps = 150000
self.warmup_proportion = 0.02
self.weight_decay= 0.001
# interval
self.train_max_steps = 200000
self.save_interval_steps = 2000
self.eval_interval_steps = 500
self.log_interval_steps = 200
self.start_schedule_teacher_forcing = 200001
self.start_ratio_value = 0.5
self.schedule_decay_steps = 50000
self.end_ratio_value = 0.0
self.num_save_intermediate_results = 1
self.outdir = outdir
self.items = {
"outdir": outdir,
"batch_size": self.batch_size,
"train_max_steps": self.train_max_steps,
"log_interval_steps": self.log_interval_steps,
"eval_interval_steps": self.eval_interval_steps,
"save_interval_steps": self.save_interval_steps,
"num_save_intermediate_results": self.num_save_intermediate_results
}
def __getitem__(self, key):
return self.items[key]
def generate_datasets(items, config, max_seq_length, max_mel_length):
def _guided_attention(char_len, mel_len, max_char_len, max_mel_len, g=0.2):
"""Guided attention. Refer to page 3 on the paper."""
max_char_seq = np.arange(max_char_len)
max_char_seq = tf.expand_dims(max_char_seq, 0) # [1, t_seq]
# [mel_seq, max_t_seq]
max_char_seq = tf.tile(max_char_seq, [max_mel_len, 1])
max_mel_seq = np.arange(max_mel_len)
max_mel_seq = tf.expand_dims(max_mel_seq, 1) # [mel_seq, 1]
# [mel_seq, max_t_seq]
max_mel_seq = tf.tile(max_mel_seq, [1, max_char_len])
right = tf.cast(max_mel_seq, tf.float32) / tf.constant(mel_len, dtype=tf.float32)
left = tf.cast(max_char_seq, tf.float32) / tf.constant(char_len, dtype=tf.float32)
ga_ = 1.0 - tf.math.exp(-((right - left) ** 2) / (2 * g * g))
return tf.transpose(ga_[:mel_len, :char_len], (1, 0))
def _generator():
for item in items:
tid, seq, feat_path, _ = item
with open(feat_path, 'rb') as f:
mel = np.fromfile(f, dtype='float32')
mel = np.resize(mel, (-1, config.n_mels))
seq_length = seq.shape[0]
mel_length = mel.shape[0]
if f is None or mel_length < config.mel_length_threshold:
continue
# create guided attention (default).
g_attention = _guided_attention(
seq_length,
mel_length,
max_seq_length,
max_mel_length,
config.guided_attention
)
data = {
"utt_ids": tid,
"input_ids": seq,
"input_lengths": seq_length,
"speaker_ids": 0,
"mel_gts": mel,
"mel_lengths": mel_length,
"g_attentions": g_attention
}
yield data
output_types = {
"utt_ids": tf.string,
"input_ids": tf.int32,
"input_lengths": tf.int32,
"speaker_ids": tf.int32,
"mel_gts": tf.float32,
"mel_lengths": tf.int32,
"g_attentions": tf.float32
}
datasets = tf.data.Dataset.from_generator(_generator, output_types=output_types)
padding_values = {
"utt_ids": " ",
"input_ids": 0,
"input_lengths": 0,
"speaker_ids": 0,
"mel_gts": 0.0,
"mel_lengths": 0,
"g_attentions": -1.0
}
padded_shapes = {
"utt_ids": [],
"input_ids": [None],
"input_lengths": [],
"speaker_ids": [],
"mel_gts": [None, config.n_mels],
"mel_lengths": [],
"g_attentions": [None, None]
}
datasets = datasets.padded_batch(
config.batch_size * STRATEGY.num_replicas_in_sync,
padded_shapes=padded_shapes,
padding_values=padding_values)
datasets = datasets.prefetch(tf.data.experimental.AUTOTUNE)
return datasets
@jit(nopython=True)
def get_duration_from_alignment(alignment):
D = np.array([0 for _ in range(np.shape(alignment)[0])])
for i in range(np.shape(alignment)[1]):
max_index = list(alignment[:, i]).index(alignment[:, i].max())
D[max_index] = D[max_index] + 1
return D
# python .\extract_duration.py --rootdir ./datasets/jsut/basic --outdir ./datasets/jsut/basic/durations --checkpoint model-211500.h5
def main():
"""Running extract tacotron-2 durations."""
parser = argparse.ArgumentParser(description="Extract durations from charactor with trained Tacotron-2 ")
parser.add_argument("--outdir", type=str, required=True, help="directory to save generated speech.")
parser.add_argument("--rootdir", type=str, required=True, help="dataset directory root")
parser.add_argument("--checkpoint", type=str, required=True, help="checkpoint file to be loaded.")
parser.add_argument("--verbose",type=int,default=1,help="logging level. higher is more logging. (default=1)")
parser.add_argument("--batch-size", default=8, type=int, help="batch size.")
parser.add_argument("--win-front", default=2, type=int, help="win-front.")
parser.add_argument("--win-back", default=2, type=int, help="win-front.")
parser.add_argument("--use-window-mask", default=1, type=int, help="toggle window masking.")
parser.add_argument("--save-alignment", default=0, type=int, help="save-alignment.")
args = parser.parse_args()
if args.checkpoint is not None and os.path.isdir(args.checkpoint):
args.checkpoint = tf.train.latest_checkpoint(args.checkpoint)
# set logger
log_format = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
if args.verbose > 1:
logging.basicConfig(level=logging.DEBUG,stream=sys.stdout,format=log_format)
elif args.verbose > 0:
logging.basicConfig(level=logging.INFO,stream=sys.stdout,format=log_format)
else:
logging.basicConfig(level=logging.WARN,stream=sys.stdout,format=log_format)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# select processor
Processor = JSpeechProcessor
processor = Processor(args.rootdir) # for test
config = Config(args.outdir, args.batch_size, processor.vocab_size(),1)
max_seq_length = processor.max_seq_length()
max_mel_length = processor.max_feat_length() // config.n_mels
# generate datasets
dataset = generate_datasets(processor.items, config, max_seq_length, max_mel_length)
# define model.
tacotron2 = TFTacotron2(config=config, training=True, name="tacotron2")
#build
input_ids = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9]])
input_lengths = np.array([9])
speaker_ids = np.array([0])
mel_outputs = np.random.normal(size=(1, 50, config.n_mels)).astype(np.float32)
mel_lengths = np.array([50])
tacotron2(input_ids,input_lengths,speaker_ids,mel_outputs,mel_lengths,10,training=True)
tacotron2.load_weights(args.checkpoint)
tacotron2.summary()
# apply tf.function for tacotron2.
tacotron2 = tf.function(tacotron2, experimental_relax_shapes=True)
for data in tqdm(dataset, desc="[Extract Duration]"):
utt_ids = data["utt_ids"]
input_lengths = data["input_lengths"]
mel_lengths = data["mel_lengths"]
utt_ids = utt_ids.numpy()
real_mel_lengths = mel_lengths
# tacotron2 inference.
_, _, _, alignment_historys = tacotron2(
**data,
use_window_mask=args.use_window_mask,
win_front=args.win_front,
win_back=args.win_back,
training=True,
)
# convert to numpy
alignment_historys = alignment_historys.numpy()
for i, alignment in enumerate(alignment_historys):
real_char_length = input_lengths[i].numpy()
real_mel_length = real_mel_lengths[i].numpy()
alignment_mel_length = int(np.ceil(real_mel_length))
alignment = alignment[:real_char_length, :alignment_mel_length]
d = get_duration_from_alignment(alignment) # [max_char_len]
assert (np.sum(d) >= real_mel_length), f"{d}, {np.sum(d)}, {alignment_mel_length}, {real_mel_length}"
if np.sum(d) > real_mel_length:
rest = np.sum(d) - real_mel_length
if d[-1] > rest:
d[-1] -= rest
elif d[0] > rest:
d[0] -= rest
else:
d[-1] -= rest // 2
d[0] -= rest - rest // 2
assert d[-1] > 0 and d[0] > 0, f"{d}, {np.sum(d)}, {real_mel_length}"
saved_name = utt_ids[i].decode("utf-8")
# check a length compatible
assert (len(d) == real_char_length), f"different between len_char and len_durations, {len(d)} and {real_char_length}"
assert (np.sum(d) == real_mel_length), f"different between sum_durations and len_mel, {np.sum(d)} and {real_mel_length}"
# save D to folder.
d.astype(np.int32).tofile(os.path.join(args.outdir, f"{saved_name}.dur"))
# save alignment to debug.
if args.save_alignment == 1:
figname = os.path.join(args.outdir, f"{saved_name}_alignment.png")
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.set_title(f"Alignment of {saved_name}")
im = ax.imshow(alignment, aspect="auto", origin="lower", interpolation="none")
fig.colorbar(im, ax=ax)
xlabel = "Decoder timestep"
plt.xlabel(xlabel)
plt.ylabel("Encoder timestep")
plt.tight_layout()
plt.savefig(figname)
plt.close()
if __name__ == "__main__":
main()
| null |
bin/dump_duration.py
|
dump_duration.py
|
py
| 14,545 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.path.append",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.config.list_physical_devices",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "tensorflow.config",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.config.experimental.set_memory_growth",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "tensorflow.config",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "tensorflow_tts.utils.return_strategy",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "tensorflow.expand_dims",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "tensorflow.tile",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "tensorflow.expand_dims",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "tensorflow.tile",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "tensorflow.cast",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.constant",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "tensorflow.cast",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.constant",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "tensorflow.math.exp",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "tensorflow.math",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.transpose",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "numpy.fromfile",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.resize",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "tensorflow.string",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.int32",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.int32",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.int32",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.float32",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.int32",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.float32",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.data.Dataset.from_generator",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "tensorflow.data",
"line_number": 172,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.data",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "numba.jit",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 227,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.latest_checkpoint",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 228,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 233,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 233,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "logging.WARN",
"line_number": 237,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 237,
"usage_type": "attribute"
},
{
"api_name": "logging.warning",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 241,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "Processor.JSpeechProcessor",
"line_number": 245,
"usage_type": "name"
},
{
"api_name": "tensorflow_tts.models.TFTacotron2",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 263,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 263,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "tensorflow.function",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "numpy.ceil",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 318,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 318,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 322,
"usage_type": "attribute"
}
] |
240954815
|
from __future__ import print_function
import torch
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import torch.nn.functional as F
from data import base_transform, VID_CLASSES, VID_CLASSES_name, MOT_CLASSES
from ssd import build_ssd
from layers.modules import AttentionLoss
import os
import time
import argparse
import numpy as np
import cv2
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(description='Single Shot MultiBox Detection')
parser.add_argument('--model_name', default='ssd300',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--confidence_threshold', default=0.3, type=float,
help='Detection confidence threshold')
parser.add_argument('--nms_threshold', default=0.45, type=float,
help='Detection confidence threshold')
parser.add_argument('--top_k', default=10, type=int,
help='Further restrict the number of predictions to parse')
parser.add_argument('--cuda', default=True, type=str2bool, help='Use cuda to train model')
parser.add_argument('--dataset_name', default='seqVID2017', help='Which dataset')
parser.add_argument('--ssd_dim', default=300, type=int, help='ssd_dim 300 or 512')
parser.add_argument('--literation', default='2900000', type=str,help='File path to save results')
parser.add_argument('--model_dir', default='./weights/ssd300_VID2017', type=str,help='Path to save model')
parser.add_argument('--video_name', default='/home/sean/data/ILSVRC/Data/VID/snippets/val/ILSVRC2017_val_00131000.mp4', type=str,help='Path to video')
parser.add_argument('--tssd', default='ssd', type=str, help='ssd or tssd')
parser.add_argument('--gpu_id', default='1', type=str, help='gpu_id')
parser.add_argument('--attention', default=False, type=str2bool, help='attention')
parser.add_argument('--save_dir', default=None, type=str, help='save dir')
parser.add_argument('--refine', default=False, type=str2bool, help='dynamic set prior box through time')
parser.add_argument('--oa_ratio', nargs='+', type=float, default=[0.0,1.0], help='step_list for learning rate')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.calls += 1
if self.calls > 10:
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.average_time = self.total_time / (self.calls-10)
if average:
return self.average_time
else:
return self.diff
else:
return 0.
if args.cuda and torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
if args.dataset_name in ['VID2017', 'seqVID2017']:
labelmap = VID_CLASSES
num_classes = len(VID_CLASSES) + 1
elif args.dataset_name in ['MOT17Det', 'seqMOT17Det' ]:
labelmap = MOT_CLASSES
num_classes = len(MOT_CLASSES) + 1
# print(num_classes)
else:
raise ValueError("dataset [%s] not recognized." % args.dataset_name)
def test_net(net, im, w, h, state=None, thresh=0.5, tim=None):
im_trans = base_transform(im, ssd_dim, mean)
x = Variable(torch.from_numpy(im_trans).unsqueeze(0).permute(0, 3, 1, 2), volatile=True)
if args.cuda:
x = x.cuda()
if args.tssd == 'ssd':
detections, att_map = net(x)
detections = detections.data
else:
tim.tic()
detections, state, att_map = net(x, state)
detections = detections.data
t_diff = tim.toc(average=True)
# print(np.around(t_diff, decimals=4))
out = list()
for j in range(1, detections.size(1)):
for k in range(detections.size(2)):
dets = detections[0, j, k, :]
# mask = dets[:, 0].gt(0.).expand(5, dets.size(0)).t()
# dets = torch.masked_select(dets, mask).view(-1, 5)
if dets.dim() == 0:
continue
boxes = dets[1:]
x_min = int(boxes[0] * w)
x_max = int(boxes[2] * w)
y_min = int(boxes[1] * h)
y_max = int(boxes[3] * h)
score = dets[0]
if score > thresh:
out.append([x_min, y_min, x_max, y_max, j-1, score])
return tuple(out), state, att_map
def att_match(att_roi_tuple, pre_att_roi_tuple, pooling_size=30):
match_list = [None] * len(att_roi_tuple)
if not pre_att_roi_tuple:
return match_list
else:
xycls_dis = np.zeros(len(att_roi_tuple), len(pre_att_roi_tuple))
for num, obj in enumerate(att_roi_tuple):
obj[0] = [F.upsample(roi, (pooling_size,pooling_size), mode='bilinear') for roi in obj[0]]
obj_x_min, obj_y_min, obj_x_max, obj_y_max, obj_cls = obj[1:]
for pre_num, pre_obj in enumerate(pre_att_roi_tuple):
if pre_num == 0:
pre_obj[0] = [F.upsample(preroi, (pooling_size,pooling_size)) for preroi in pre_att_roi]
preobj_x_min, preobj_y_min, preobj_x_max, preobj_y_max, preobj_cls = pre_obj[1:]
xycls_dis[num, pre_num] = (obj_x_min - preobj_x_min) + \
(obj_y_min - preobj_y_min) + \
(obj_x_max - preobj_x_max) + \
(obj_y_max - preobj_y_max) + \
(1,0)[obj_cls==preobj_cls]
return match_list
if __name__ == '__main__':
mean = (104, 117, 123)
ssd_dim = args.ssd_dim
if args.model_dir in ['../weights/ssd300_VIDDET', '../weights/ssd300_VIDDET_186', '../weights/ssd300_VIDDET_512', '../weights/attssd300_VIDDET_512']:
trained_model = os.path.join(args.model_dir, 'ssd300_VIDDET_' + args.literation + '.pth')
else:
trained_model = os.path.join(args.model_dir,
args.model_name + '_' + 'seq' + args.dataset_name + '_' + args.literation + '.pth') \
if args.tssd in ['lstm', 'tblstm', 'gru'] else os.path.join(args.model_dir,
args.model_name + '_' + args.dataset_name + '_' + args.literation + '.pth')
print('loading model!')
net = build_ssd('test', ssd_dim, num_classes, tssd=args.tssd,
top_k = args.top_k,
thresh = args.confidence_threshold,
nms_thresh = args.nms_threshold,
attention=args.attention, #o_ratio=args.oa_ratio[0], a_ratio=args.oa_ratio[1],
refine=args.refine)
net.load_state_dict(torch.load(trained_model))
net.eval()
print('Finished loading model!', args.model_dir, args.literation)
if args.cuda:
net = net.cuda()
cudnn.benchmark = True
tim = Timer()
frame_num = 0
cap = cv2.VideoCapture(args.video_name)
w, h = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
print(w,h)
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_num)
att_criterion = AttentionLoss((h,w))
state = [None]*6 if args.tssd in ['lstm', 'tblstm', 'gru'] else None
identity = 0
while (cap.isOpened()):
ret, frame = cap.read()
if not ret:
break
frame_draw = frame.copy()
frame_num += 1
objects, state, att_map = test_net(net, frame, w, h, state=state, thresh=args.confidence_threshold, tim=tim)
if args.attention:
_, up_attmap = att_criterion(att_map) # scale, batch, tensor(1,h,w)
att_target = up_attmap[0][0].cpu().data.numpy().transpose(1, 2, 0)
for object in objects:
color = (0,0,255)
x_min, y_min, x_max, y_max, cls, score = object
# tubelets[cls][identity] = [objects,]
## Draw
cv2.rectangle(frame_draw, (x_min, y_min), (x_max, y_max), color, thickness=2)
cv2.fillConvexPoly(frame_draw, np.array(
[[x_min-1, y_min], [x_min-1, y_min - 50], [x_max+1 , y_min - 50], [x_max+1, y_min]], np.int32),
color)
cv2.putText(frame_draw, VID_CLASSES_name[cls] + str(np.around(score, decimals=2)),
(x_min + 10, y_min - 10), cv2.FONT_HERSHEY_DUPLEX, 1.4, color=(255, 255, 255), thickness=2)
print(str(frame_num)+':'+str(np.around(score, decimals=2))+',')
cv2.imshow('att', att_target)
# cv2.imshow('mask', att_target)
# else:
# print(frame_num)
cv2.imshow('frame', frame_draw)
ch = cv2.waitKey(1)
if ch == 32:
# if frame_num in [11, 23, 44, 60, 76, 89]:
while 1:
in_ch = cv2.waitKey(10)
if in_ch == 115: # 's'
if args.save_dir:
print('save: ', frame_num)
if args.tssd == 'ssd':
save_tuple =(objects, up_attmap) if args.attention else (objects,)
torch.save(save_tuple, os.path.join(args.save_dir, 'ssd_%s.pkl' % str(frame_num)))
else:
cv2.imwrite(os.path.join(args.save_dir, '%s.jpg' % str(frame_num)), frame)
save_tuple =(objects,state, up_attmap) if args.attention else (objects, state)
torch.save(save_tuple, os.path.join(args.save_dir, '%s.pkl' % str(frame_num)))
# cv2.imwrite('./11.jpg', frame)
elif in_ch == 32:
break
cap.release()
cv2.destroyAllWindows()
| null |
test_video.py
|
test_video.py
|
py
| 10,108 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "torch.set_default_tensor_type",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "torch.set_default_tensor_type",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "data.VID_CLASSES",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "data.VID_CLASSES",
"line_number": 81,
"usage_type": "argument"
},
{
"api_name": "data.MOT_CLASSES",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "data.MOT_CLASSES",
"line_number": 84,
"usage_type": "argument"
},
{
"api_name": "data.base_transform",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.upsample",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.upsample",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "ssd.build_ssd",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "torch.backends.cudnn.benchmark",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "torch.backends.cudnn",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_FRAME_WIDTH",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FRAME_HEIGHT",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_POS_FRAMES",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "layers.modules.AttentionLoss",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "cv2.fillConvexPoly",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 203,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "data.VID_CLASSES_name",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "numpy.around",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_DUPLEX",
"line_number": 206,
"usage_type": "attribute"
},
{
"api_name": "numpy.around",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 223,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 225,
"usage_type": "attribute"
},
{
"api_name": "torch.save",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 227,
"usage_type": "attribute"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 234,
"usage_type": "call"
}
] |
141011904
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
from xml.etree import ElementTree
from lxml import etree
import requests as rq
def dfs(element, level, colors):
level += 1
for child in element:
if child.attrib['color'] == 'blue':
colors[2] += level
if child.attrib['color'] == 'red':
colors[0] += level
if child.attrib['color'] == 'green':
colors[1] += level
dfs(child, level, colors)
root = ElementTree.fromstring(input())
level = 1
colors = [0, 0, 0]
if root.attrib['color'] == 'blue':
colors[2] += 1
if root.attrib['color'] == 'red':
colors[0] += 1
if root.attrib['color'] == 'green':
colors[1] += 1
dfs(root, level, colors)
print(f'{colors[0]} {colors[1]} {colors[2]}')
# In[ ]:
| null |
module3/xml(1).py
|
xml(1).py
|
py
| 782 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "xml.etree.ElementTree.fromstring",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 24,
"usage_type": "name"
}
] |
74466009
|
import torch
import torch.nn as nn
import pandas as pd
import numpy as np
from useless_ids import useless_ids
from collections import Iterable
class SplitBaselineConfig:
def __init__(self,
atlas_roi_path="atlas_roi.csv",
atlas=["AAL"],
ctype="all",
out_dim = 3,
dropout = 0.6
):
if isinstance(atlas,Iterable):
self.atlas = [item.lower() for item in atlas]
else:
if atlas.lower() == "all":
self.atlas = "all"
else:
self.atlas = [atlas.lower()]
self.ctype = ctype
self.atlas_roi_path = atlas_roi_path
self.out_dim = out_dim
self.dropout = dropout
def initialize(self):
self.x_ids = self.cal_x_ids()
self.in_dim = len(self.x_ids)
self.n_hidden = min(self.in_dim // 8,512)
self.n_hidden = max(self.n_hidden,32)
def atlas_contain(self,atlas):
"""judge if atlas contains in atlas
"""
if self.atlas == "all":
return True
else:
for item in self.atlas:
if item.lower() in atlas.lower():
return True
return False
def cal_x_ids(self):
atlas_roi_df = pd.read_csv(self.atlas_roi_path)
ctype = self.ctype.lower()
x_ids = []
if ctype == "gmv":
for i in range(len(atlas_roi_df)):
if "mesh" in atlas_roi_df["Atlas"][i] and self.atlas_contain(atlas_roi_df["Atlas"][i]):
x_ids.append(atlas_roi_df["dim"][i])
elif ctype == "ct":
for i in range(len(atlas_roi_df)):
if "mesh" not in atlas_roi_df["Atlas"][i] and self.atlas_contain(atlas_roi_df["Atlas"][i]):
x_ids.append(atlas_roi_df["dim"][i])
else:
for i in range(len(atlas_roi_df)):
if self.atlas_contain(atlas_roi_df["Atlas"][i]):
x_ids.append(atlas_roi_df["dim"][i])
# filter useless id here
new_ids = []
for ids in x_ids:
if ids in useless_ids:
continue
else:
new_ids.append(ids)
return np.array(new_ids)
class FullResLayer(nn.Module):
def __init__(self,in_dim,dropout):
super().__init__()
self.net = nn.Sequential(
nn.Linear(in_dim,in_dim),
nn.ELU(),
nn.Dropout(dropout),
nn.Linear(in_dim,in_dim)
)
self.relu = nn.ELU()
def forward(self,x):
return self.relu(self.net(x) + x)
class SplitBaseline(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.x_ids = config.x_ids
self.net = nn.Sequential(
nn.Dropout(self.config.dropout),
nn.Linear(self.config.in_dim, self.config.n_hidden),
nn.ELU(),
nn.Dropout(self.config.dropout),
FullResLayer(self.config.n_hidden,self.config.dropout),
nn.Dropout(self.config.dropout),
FullResLayer(self.config.n_hidden,self.config.dropout),
nn.Dropout(self.config.dropout),
nn.Linear(self.config.n_hidden, self.config.out_dim),
nn.Softmax(dim=-1)
)
def _preprocess(self,x):
if len(x.shape) == 1:
return x[self.x_ids]
if len(x.shape) == 2:
return x[:,self.x_ids]
def forward(self, x):
"""
args:
x : input of feaure
return:
x : possibility of three class
Shape:
input : [batch_size, in_dim]
output : [batch_size,out_dim]
"""
x = self._preprocess(x)
x = self.net(x)
return x
def test():
config = SplitBaselineConfig(atlas=["Hammers"])
config.initialize()
print(config.__dict__)
model = SplitBaseline(config)
model.load_state_dict(torch.load("model.pth", map_location ='cpu'))
model.eval()
x = np.load("../data/train/Subject_0004.npy")
x = np.nan_to_num(x, nan=1.e-10, posinf=1.e-10, neginf=1.e-10)
mean = np.load("mean.npy")
std = np.load("std.npy")
x = (x - mean)/std
x = np.nan_to_num(x, nan=1.e-10, posinf=1.e-10, neginf=1.e-10)
x = torch.tensor(x).unsqueeze(0).float()
print(x.shape)
y = model(x)
print(y)
if __name__ == "__main__":
test()
| null |
code_sub/splitbaseline.py
|
splitbaseline.py
|
py
| 4,453 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "collections.Iterable",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "pandas.read_csv",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "useless_ids.useless_ids",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "torch.nn.ELU",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "torch.nn.ELU",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "torch.nn.ELU",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "torch.nn.Softmax",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "torch.load",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.nan_to_num",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "numpy.nan_to_num",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 139,
"usage_type": "call"
}
] |
571616389
|
import unittest
from mock import MagicMock
from troposphere import (
Base64,
Ref,
)
from stacker.blueprints.base import (
Blueprint,
CFNParameter,
build_parameter,
get_local_parameters,
)
from stacker.blueprints.variables.types import (
CFNString,
EC2AvailabilityZoneNameList,
)
from stacker.exceptions import (
InvalidLookupCombination,
MissingLocalParameterException,
MissingVariable,
UnresolvedVariables,
)
from stacker.variables import Variable
from ..factories import mock_lookup
class TestLocalParameters(unittest.TestCase):
def test_default_parameter(self):
parameter_def = {"Param1": {"default": 0}}
parameters = {}
local = get_local_parameters(parameter_def, parameters)
self.assertEquals(local["Param1"], 0)
def test_missing_required(self):
parameter_def = {"Param1": {"default": 0}, "Param2": {}}
parameters = {}
with self.assertRaises(MissingLocalParameterException) as cm:
get_local_parameters(parameter_def, parameters)
self.assertEquals("Param2", cm.exception.parameter)
def test_supplied_parameter(self):
parameter_def = {"Param1": {"default": 0}, "Param2": {}}
parameters = {"Param1": 1, "Param2": 2}
local = get_local_parameters(parameter_def, parameters)
self.assertEquals(parameters, local)
class TestBuildParameter(unittest.TestCase):
def test_base_parameter(self):
p = build_parameter("BasicParam", {"type": "String"})
p.validate()
self.assertEquals(p.Type, "String")
class TestVariables(unittest.TestCase):
def test_defined_variables(self):
class TestBlueprint(Blueprint):
VARIABLES = {
"Param1": {"default": "default", "type": str},
}
blueprint = TestBlueprint(name="test", context=MagicMock())
self.assertEqual(
blueprint.defined_variables(),
blueprint.VARIABLES,
)
def test_defined_variables_subclass(self):
class TestBlueprint(Blueprint):
VARIABLES = {
"Param1": {"default": 0, "type": int},
"Param2": {"default": 0, "type": int},
}
class TestBlueprintSublcass(TestBlueprint):
def defined_variables(self):
variables = super(TestBlueprintSublcass,
self).defined_variables()
variables["Param2"]["default"] = 1
variables["Param3"] = {"default": 1, "type": int}
return variables
blueprint = TestBlueprintSublcass(name="test", context=MagicMock())
variables = blueprint.defined_variables()
self.assertEqual(len(variables.keys()), 3)
self.assertEqual(variables["Param2"]["default"], 1)
def test_get_variables_unresolved_variables(self):
class TestBlueprint(Blueprint):
pass
blueprint = TestBlueprint(name="test", context=MagicMock())
with self.assertRaises(UnresolvedVariables):
blueprint.get_variables()
def test_resolve_variables(self):
class TestBlueprint(Blueprint):
VARIABLES = {
"Param1": {"default": 0, "type": int},
"Param2": {"type": str},
}
blueprint = TestBlueprint(name="test", context=MagicMock())
variables = [
Variable("Param1", 1),
Variable("Param2", "${other-stack::Output}"),
Variable("Param3", 3),
]
resolved_lookups = {
mock_lookup("other-stack::Output"): "Test Output",
}
for var in variables:
var.replace(resolved_lookups)
blueprint.resolve_variables(variables)
self.assertEqual(blueprint.resolved_variables["Param1"], 1)
self.assertEqual(blueprint.resolved_variables["Param2"], "Test Output")
self.assertIsNone(blueprint.resolved_variables.get("Param3"))
def test_resolve_variables_lookup_returns_non_string(self):
class TestBlueprint(Blueprint):
VARIABLES = {
"Param1": {"type": list},
}
blueprint = TestBlueprint(name="test", context=MagicMock())
variables = [Variable("Param1", "${custom non-string-return-val}")]
lookup = mock_lookup("non-string-return-val", "custom",
"custom non-string-return-val")
resolved_lookups = {
lookup: ["something"],
}
for var in variables:
var.replace(resolved_lookups)
blueprint.resolve_variables(variables)
self.assertEqual(blueprint.resolved_variables["Param1"], ["something"])
def test_resolve_variables_lookup_returns_troposphere_obj(self):
class TestBlueprint(Blueprint):
VARIABLES = {
"Param1": {"type": Base64},
}
blueprint = TestBlueprint(name="test", context=MagicMock())
variables = [Variable("Param1", "${custom non-string-return-val}")]
lookup = mock_lookup("non-string-return-val", "custom",
"custom non-string-return-val")
resolved_lookups = {
lookup: Base64("test"),
}
for var in variables:
var.replace(resolved_lookups)
blueprint.resolve_variables(variables)
self.assertEqual(blueprint.resolved_variables["Param1"].data,
Base64("test").data)
def test_resolve_variables_lookup_returns_non_string_invalid_combo(self):
class TestBlueprint(Blueprint):
VARIABLES = {
"Param1": {"type": list},
}
variables = [
Variable(
"Param1",
"${custom non-string-return-val},${some-stack::Output}",
)
]
lookup = mock_lookup("non-string-return-val", "custom",
"custom non-string-return-val")
resolved_lookups = {
lookup: ["something"],
}
with self.assertRaises(InvalidLookupCombination):
for var in variables:
var.replace(resolved_lookups)
def test_get_variables(self):
class TestBlueprint(Blueprint):
VARIABLES = {
"Param1": {"type": int},
"Param2": {"type": str},
}
blueprint = TestBlueprint(name="test", context=MagicMock())
variables = [Variable("Param1", 1), Variable("Param2", "Test Output")]
blueprint.resolve_variables(variables)
variables = blueprint.get_variables()
self.assertEqual(variables["Param1"], 1)
self.assertEqual(variables["Param2"], "Test Output")
def test_resolve_variables_missing_variable(self):
class TestBlueprint(Blueprint):
VARIABLES = {
"Param1": {"type": int},
"Param2": {"type": str},
}
blueprint = TestBlueprint(name="test", context=MagicMock())
variables = [Variable("Param1", 1)]
with self.assertRaises(MissingVariable):
blueprint.resolve_variables(variables)
def test_resolve_variables_incorrect_type(self):
class TestBlueprint(Blueprint):
VARIABLES = {
"Param1": {"type": int},
}
blueprint = TestBlueprint(name="test", context=MagicMock())
variables = [Variable("Param1", "Something")]
with self.assertRaises(ValueError):
blueprint.resolve_variables(variables)
def test_get_variables_default_value(self):
class TestBlueprint(Blueprint):
VARIABLES = {
"Param1": {"type": int, "default": 1},
"Param2": {"type": str},
}
blueprint = TestBlueprint(name="test", context=MagicMock())
variables = [Variable("Param2", "Test Output")]
blueprint.resolve_variables(variables)
variables = blueprint.get_variables()
self.assertEqual(variables["Param1"], 1)
self.assertEqual(variables["Param2"], "Test Output")
def test_resolve_variables_convert_type(self):
class TestBlueprint(Blueprint):
VARIABLES = {
"Param1": {"type": int},
}
blueprint = TestBlueprint(name="test", context=MagicMock())
variables = [Variable("Param1", "1")]
blueprint.resolve_variables(variables)
variables = blueprint.get_variables()
self.assertTrue(isinstance(variables["Param1"], int))
def test_resolve_variables_cfn_type(self):
class TestBlueprint(Blueprint):
VARIABLES = {
"Param1": {"type": CFNString},
}
blueprint = TestBlueprint(name="test", context=MagicMock())
variables = [Variable("Param1", "Value")]
blueprint.resolve_variables(variables)
variables = blueprint.get_variables()
self.assertTrue(isinstance(variables["Param1"], CFNParameter))
def test_resolve_variables_cfn_type_list(self):
class TestBlueprint(Blueprint):
VARIABLES = {
"Param1": {"type": EC2AvailabilityZoneNameList},
}
blueprint = TestBlueprint(name="test", context=MagicMock())
variables = [Variable("Param1", ["us-east-1", "us-west-2"])]
blueprint.resolve_variables(variables)
variables = blueprint.get_variables()
self.assertTrue(isinstance(variables["Param1"], CFNParameter))
self.assertEqual(variables["Param1"].value, ["us-east-1", "us-west-2"])
self.assertEqual(variables["Param1"].ref.data, Ref("Param1").data)
parameters = blueprint.get_cfn_parameters()
self.assertEqual(parameters["Param1"], ["us-east-1", "us-west-2"])
def test_resolve_variables_cfn_type_list_invalid_value(self):
class TestBlueprint(Blueprint):
VARIABLES = {
"Param1": {"type": EC2AvailabilityZoneNameList},
}
blueprint = TestBlueprint(name="test", context=MagicMock())
variables = [Variable("Param1", {"main": "us-east-1"})]
with self.assertRaises(ValueError):
blueprint.resolve_variables(variables)
variables = blueprint.get_variables()
def test_get_parameters_cfn_type_list(self):
class TestBlueprint(Blueprint):
VARIABLES = {
"Param1": {"type": EC2AvailabilityZoneNameList},
}
blueprint = TestBlueprint(name="test", context=MagicMock())
parameters = blueprint._get_parameters()
self.assertTrue("Param1" in parameters)
parameter = parameters["Param1"]
self.assertEqual(parameter["type"],
"List<AWS::EC2::AvailabilityZone::Name>")
def test_get_parameters_cfn_type(self):
class TestBlueprint(Blueprint):
VARIABLES = {
"Param1": {"type": CFNString},
}
blueprint = TestBlueprint(name="test", context=MagicMock())
parameters = blueprint._get_parameters()
self.assertTrue("Param1" in parameters)
parameter = parameters["Param1"]
self.assertEqual(parameter["type"], "String")
def test_required_parameters_cfn_type(self):
class TestBlueprint(Blueprint):
VARIABLES = {
"Param1": {"type": CFNString},
}
blueprint = TestBlueprint(name="test", context=MagicMock())
blueprint.setup_parameters()
self.assertEqual(blueprint.required_parameters[0][0], "Param1")
def test_get_cfn_parameters(self):
class TestBlueprint(Blueprint):
VARIABLES = {
"Param1": {"type": int},
"Param2": {"type": CFNString},
}
blueprint = TestBlueprint(name="test", context=MagicMock())
variables = [Variable("Param1", "1"), Variable("Param2", "Value")]
blueprint.resolve_variables(variables)
variables = blueprint.get_variables()
self.assertEqual(len(variables), 2)
parameters = blueprint.get_cfn_parameters()
self.assertEqual(len(parameters.keys()), 1)
self.assertEqual(parameters["Param2"], "Value")
| null |
stacker/tests/blueprints/test_base.py
|
test_base.py
|
py
| 12,254 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "unittest.TestCase",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "stacker.blueprints.base.get_local_parameters",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "stacker.exceptions.MissingLocalParameterException",
"line_number": 42,
"usage_type": "argument"
},
{
"api_name": "stacker.blueprints.base.get_local_parameters",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "stacker.blueprints.base.get_local_parameters",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "stacker.blueprints.base.build_parameter",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "stacker.blueprints.base.Blueprint",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "mock.MagicMock",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "stacker.blueprints.base.Blueprint",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "mock.MagicMock",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "stacker.blueprints.base.Blueprint",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "mock.MagicMock",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "stacker.exceptions.UnresolvedVariables",
"line_number": 101,
"usage_type": "argument"
},
{
"api_name": "stacker.blueprints.base.Blueprint",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "mock.MagicMock",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "stacker.variables.Variable",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "stacker.variables.Variable",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "stacker.variables.Variable",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "factories.mock_lookup",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "stacker.blueprints.base.Blueprint",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "mock.MagicMock",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "stacker.variables.Variable",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "factories.mock_lookup",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "stacker.blueprints.base.Blueprint",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "troposphere.Base64",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "mock.MagicMock",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "stacker.variables.Variable",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "factories.mock_lookup",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "troposphere.Base64",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "troposphere.Base64",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "stacker.blueprints.base.Blueprint",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "stacker.variables.Variable",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "factories.mock_lookup",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "stacker.exceptions.InvalidLookupCombination",
"line_number": 184,
"usage_type": "argument"
},
{
"api_name": "stacker.blueprints.base.Blueprint",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "mock.MagicMock",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "stacker.variables.Variable",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "stacker.blueprints.base.Blueprint",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "mock.MagicMock",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "stacker.variables.Variable",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "stacker.exceptions.MissingVariable",
"line_number": 211,
"usage_type": "argument"
},
{
"api_name": "stacker.blueprints.base.Blueprint",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "mock.MagicMock",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "stacker.variables.Variable",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "stacker.blueprints.base.Blueprint",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "mock.MagicMock",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "stacker.variables.Variable",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "stacker.blueprints.base.Blueprint",
"line_number": 240,
"usage_type": "name"
},
{
"api_name": "mock.MagicMock",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "stacker.variables.Variable",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "stacker.blueprints.base.Blueprint",
"line_number": 252,
"usage_type": "name"
},
{
"api_name": "stacker.blueprints.variables.types.CFNString",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "mock.MagicMock",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "stacker.variables.Variable",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "stacker.blueprints.base.CFNParameter",
"line_number": 261,
"usage_type": "argument"
},
{
"api_name": "stacker.blueprints.base.Blueprint",
"line_number": 264,
"usage_type": "name"
},
{
"api_name": "stacker.blueprints.variables.types.EC2AvailabilityZoneNameList",
"line_number": 266,
"usage_type": "name"
},
{
"api_name": "mock.MagicMock",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "stacker.variables.Variable",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "stacker.blueprints.base.CFNParameter",
"line_number": 273,
"usage_type": "argument"
},
{
"api_name": "troposphere.Ref",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "stacker.blueprints.base.Blueprint",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "stacker.blueprints.variables.types.EC2AvailabilityZoneNameList",
"line_number": 282,
"usage_type": "name"
},
{
"api_name": "mock.MagicMock",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "stacker.variables.Variable",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "stacker.blueprints.base.Blueprint",
"line_number": 292,
"usage_type": "name"
},
{
"api_name": "stacker.blueprints.variables.types.EC2AvailabilityZoneNameList",
"line_number": 294,
"usage_type": "name"
},
{
"api_name": "mock.MagicMock",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "stacker.blueprints.base.Blueprint",
"line_number": 305,
"usage_type": "name"
},
{
"api_name": "stacker.blueprints.variables.types.CFNString",
"line_number": 307,
"usage_type": "name"
},
{
"api_name": "mock.MagicMock",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "stacker.blueprints.base.Blueprint",
"line_number": 317,
"usage_type": "name"
},
{
"api_name": "stacker.blueprints.variables.types.CFNString",
"line_number": 319,
"usage_type": "name"
},
{
"api_name": "mock.MagicMock",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "stacker.blueprints.base.Blueprint",
"line_number": 327,
"usage_type": "name"
},
{
"api_name": "stacker.blueprints.variables.types.CFNString",
"line_number": 330,
"usage_type": "name"
},
{
"api_name": "mock.MagicMock",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "stacker.variables.Variable",
"line_number": 334,
"usage_type": "call"
}
] |
414012672
|
#!/usr/bin/env python
# See: https://github.com/fiedl/hole-ice-study/issues/57
# import code; code.interact(local=dict(globals(), **locals())) # like binding.pry
import sys
import pandas
flasher_data_file = "~/icecube/flasher-data/oux.63_30"
flasher_data = pandas.read_csv(flasher_data_file, delim_whitespace = True, names = ["string_number", "dom_number", "time", "charge"])
import os
import glob2
simulation_data_files = []
if len(sys.argv) > 1:
base_path = sys.argv[1]
simulation_data_files = list(glob2.glob(os.path.join(os.path.expanduser(base_path), "./**/hits.txt")))
if len(simulation_data_files) == 0:
simulation_data_files.append("~/hole-ice-study/results/flasher_pulse_widths/esca0.05_r0.5rdom_abs100_width127/hits.txt")
import numpy as np
import matplotlib.pyplot as plt
for simulation_data_file in simulation_data_files:
simulation_data = pandas.read_csv(simulation_data_file, delim_whitespace = True, names = ["string_number", "dom_number", "time", "charge"], header = 1)
# for all receiving strings, which are located around the
# sending string 63, collect the photon hits for each
# dom.
#
receiving_strings = [62, 54, 55, 64, 71, 70]
doms = range(1, 60)
def extract_hits(data):
global receiving_strings
global doms
number_of_total_hits_in_detector = data["charge"].sum()
number_of_total_hits_in_receiving_strings = data[data.string_number.isin(receiving_strings)]["charge"].sum()
hits = []
for dom in doms:
hits_at_this_z = data \
[data.string_number.isin(receiving_strings)] \
[data.dom_number == dom] \
["charge"].sum()
hits.append(hits_at_this_z)
relative_hits = np.asarray(hits) * 1.0 / number_of_total_hits_in_detector
return relative_hits
flasher_data_hits = extract_hits(flasher_data)
simulation_data_hits = extract_hits(simulation_data)
# prepare canvas
fig, ax = plt.subplots(facecolor="white")
ax.plot(doms, flasher_data_hits, "ro", label = "flasher data 2012, total hits = " + str(flasher_data["charge"].sum()))
ax.plot(doms, simulation_data_hits, "bo", label = "simulation, total hits = " + str(simulation_data["charge"].sum()))
ax.set_title("Comparing flasher data to simulation, emitting DOM 63_30")
ax.set_xlabel("DOM number ~ z coordinate")
ax.set_ylabel("number of hits / total number of hits in whole detector")
ax.grid()
ax.legend(loc = "upper right")
plt.show()
| null |
scripts/lib/plot_number_of_hits_on_all_neighbouring_strings.py
|
plot_number_of_hits_on_all_neighbouring_strings.py
|
py
| 2,428 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "glob2.glob",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.expanduser",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 65,
"usage_type": "name"
}
] |
252887051
|
import tensorflow as tf
from gqn.gqn_model import gqn_draw_model_fn
from gqn.gqn_params import GQN_DEFAULT_CONFIG, create_gqn_config
from data_provider.gqn_tfr_provider import gqn_input_fn
import matplotlib.pyplot as plt
import numpy as np
MODEL_DIR='/home/cylee/gqn/models'
DATA_DIR='/home/cylee/gqn/gqn-dataset'
DATASET='rooms_ring_camera'
#params = create_gqn_config()
params = GQN_DEFAULT_CONFIG
estimator = tf.estimator.Estimator(
model_fn=gqn_draw_model_fn,
model_dir=MODEL_DIR,
params={'gqn_params' : params, 'debug' : False})
input_fn = lambda mode: gqn_input_fn(
dataset=DATASET,
#context_size=params['CONTEXT_SIZE'],
context_size=params.CONTEXT_SIZE,
root=DATA_DIR,
mode=mode)
for prediction in estimator.predict(input_fn=input_fn):
# prediction is the dict @ogroth was mentioning
print(prediction['predicted_mean']) # this is probably what you want to look at
print(prediction['predicted_variance']) # or use this to sample a noisy image
a = prediction['predicted_mean']
print(type(a))
print(a.shape)
plt.imsave('fig.png', a)
break
| null |
www.py
|
www.py
|
py
| 1,137 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "gqn.gqn_params.GQN_DEFAULT_CONFIG",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "tensorflow.estimator.Estimator",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "tensorflow.estimator",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "gqn.gqn_model.gqn_draw_model_fn",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "data_provider.gqn_tfr_provider.gqn_input_fn",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imsave",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
}
] |
98556511
|
import torch
import torch.nn.functional as F
from torch_geometric.data import Data
from gnn import Graph
import json
import string
import pickle
import random
random.seed(1234)
torch.manual_seed(1234)
torch.cuda.manual_seed_all(1234)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str)
parser.add_argument('--rule_type', type=str)
parser.add_argument('--label_type', type=str)
args = parser.parse_args()
with open('config-{}.json'.format(args.label_type), 'r') as f:
config = json.loads(f.read())
dataset = args.dataset
rule_type = args.rule_type
label_type = args.label_type
output_directory = '../candidates/{}'.format(args.dataset)
if not rule_type in config:
print("No {} pattern propagation, please checn configuration file for available patterns. ".format(rule_type))
train_file_path = '../datasets/{}/train.pickle'.format(dataset)
dev_file_path = '../datasets/{}/dev.pickle'.format(dataset)
nodes2idx_path, pos_seeds_path, neg_seeds_path, nodes_embedding_path, edge_path=None, None, None, None, None
# if not pattern_type.startswith('SurfaceForm'):
# tmp_pattern_type = '_'.join(pattern_type.split('_')[:-1])
# nodes2idx_path = 'cached_embeddings/{}_node2idx.pk'.format(tmp_pattern_type)
# pos_seeds_path = 'cached_embeddings/{}_Pos_Seeds.pk'.format(tmp_pattern_type)
# neg_seeds_path = 'cached_embeddings/{}_Neg_Seeds.pk'.format(tmp_pattern_type)
# nodes_embedding_path = 'cached_embeddings/{}_node_embeddings.pk'.format(tmp_pattern_type)
# edge_path = 'cached_embeddings/{}_edges.pk'.format(tmp_pattern_type)
# else:
nodes2idx_path = '../cached_seeds_and_embeddings/{}/{}_{}_node2idx.pk'.format(dataset, label_type, rule_type)
pos_seeds_path = '../cached_seeds_and_embeddings/{}/{}_{}_Pos_Seeds.pk'.format(dataset, label_type, rule_type)
neg_seeds_path = '../cached_seeds_and_embeddings/{}/{}_{}_Neg_Seeds.pk'.format(dataset, label_type, rule_type)
nodes_embedding_path = '../cached_seeds_and_embeddings/{}/{}_{}_node_embeddings.pk'.format(dataset, label_type, rule_type)
edge_path = '../cached_seeds_and_embeddings/{}/{}_{}_edges.pk'.format(dataset, label_type, rule_type)
epochs = config[rule_type]['epochs']
num_of_pattern_to_save = config[rule_type]['num_of_pattern_to_save']
num_round_to_integrate = config[rule_type]['num_round_to_integrate']
group_total = 5
output_file_prefix = "{}_{}".format(label_type, rule_type)
"""
NOTE: to increase model stability and performance, for each group, we will actually train the model 5 (num_round_to_integrate) times to integrate propagtion results. Then we will run 5 groups to calculate model performance mean and standard deviation.
NOTE: the output files will be save to the format:
<output_directory>/<output_file_prefix>_g<group>_r<1..5>.txt.
e.g. candidates/SurfaceForm_g1_r1.txt.
"""
for group in range(1, group_total+1):
with open(train_file_path, 'rb') as f:
train_data = pickle.load(f)
with open(dev_file_path, 'rb') as f:
dev_data = pickle.load(f)
print('Train: ', len(train_data))
print('Dev: ', len(dev_data))
## Load cached nodes and edges data
with open(nodes2idx_path, 'rb') as f:
node2idx = pickle.load(f)
idx2node = {v:k for k,v in node2idx.items()}
print("node2idx: ", len(node2idx))
with open(pos_seeds_path, 'rb') as f:
pos_seeds = list(pickle.load(f))
print("pos_seeds: ", len(pos_seeds))
with open(neg_seeds_path, 'rb') as f:
neg_seeds = list(pickle.load(f))
print("neg_seeds: ", len(neg_seeds))
with open(nodes_embedding_path, 'rb') as f:
node2emb = pickle.load(f)
print("node2emb: ", len(node2emb))
with open(edge_path, 'rb') as f:
edge_index = pickle.load(f)
print("edge_index: ", edge_index.shape)
# create label
y = []
for i in range(len(node2idx)):
node = idx2node[i]
if node in pos_seeds:
y.append(1)
elif node in neg_seeds:
y.append(0)
else:
y.append(-1)
random.shuffle(pos_seeds)
random.shuffle(neg_seeds)
pos_train_percent = config[rule_type]['pos_train_percent'] #0.8 (surface)
neg_train_percent = config[rule_type]['neg_train_percent'] #0.8 (surface)
train_pos_seeds = pos_seeds[:int(len(pos_seeds)*pos_train_percent)]
test_pos_seeds = pos_seeds[int(len(pos_seeds)*pos_train_percent):]
train_neg_seeds = neg_seeds[:int(len(neg_seeds)*neg_train_percent)]
test_neg_seeds = neg_seeds[int(len(neg_seeds)*neg_train_percent):]
print("pos train num: ", len(train_pos_seeds), "neg train num:", len(train_neg_seeds))
print("pos test num: ", len(test_pos_seeds), "neg test num:", len(test_neg_seeds))
train_list = train_pos_seeds + train_neg_seeds
test_list = test_pos_seeds + test_neg_seeds
print(len(train_list))
print(len(test_list))
train_idx_list = [node2idx[w] for w in train_list if w in node2idx]
test_idx_list = [node2idx[w] for w in test_list if w in node2idx]
## build graph
y_longtensor = torch.LongTensor(y)
y_tensor = torch.Tensor(y)
train_mask = torch.BoolTensor([False]*len(y))
train_mask[train_idx_list] = True
test_mask = torch.BoolTensor([False]*len(y))
test_mask[test_idx_list] = True
train_pos_mask = torch.BoolTensor([False]*len(y))
train_pos_mask[[node2idx[w] for w in train_pos_seeds if w in node2idx]] = True
train_neg_mask = torch.BoolTensor([False]*len(y))
train_neg_mask[[node2idx[w] for w in train_neg_seeds if w in node2idx]] = True
graph_data = Data(
x=node2emb, edge_index=edge_index, y_longtensor=y_longtensor, y_tensor=y_tensor,
train_mask=train_mask, test_mask=test_mask,
train_pos_mask=train_pos_mask, train_neg_mask=train_neg_mask
)
graph_data
for r in range(1, num_round_to_integrate+1):
torch.cuda.empty_cache()
model = Graph(node_feature_dim=2048, output_dim=1).to(device)
graph_data = graph_data.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=5e-4)
sim_function = torch.nn.CosineSimilarity(dim=0, eps=1e-6)
model.train()
for epoch in range(epochs):
optimizer.zero_grad()
logit, out = model(graph_data)
loss = F.binary_cross_entropy(F.sigmoid(logit[graph_data.train_mask]), graph_data.y_tensor[graph_data.train_mask].unsqueeze(1))
loss2 = F.mse_loss(F.sigmoid(out[graph_data.edge_index[0]]), F.sigmoid(out[graph_data.edge_index[1]]))
loss3 = sim_function(out[graph_data.train_pos_mask].mean(dim=0), out[graph_data.train_neg_mask].mean(dim=0))
total_loss = loss + loss2 + loss3
print(total_loss.item())
total_loss.backward()
optimizer.step()
sim_function = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
model.eval()
logit, out = model(graph_data)
pos_centroid = out[graph_data.train_pos_mask].mean(dim=0)
neg_centroid = out[graph_data.train_neg_mask].mean(dim=0)
print(sim_function(pos_centroid.unsqueeze(0), neg_centroid.unsqueeze(0)))
dis2pos = sim_function(out, pos_centroid.unsqueeze(0).expand(out.shape[0], -1))
dis2neg = sim_function(out, neg_centroid.unsqueeze(0).expand(out.shape[0], -1))
pred = torch.zeros(out.shape[0]).to(device)
pred[dis2pos>dis2neg] = 1
correct = float (pred[graph_data.test_mask].eq(graph_data.y_longtensor[graph_data.test_mask]).sum().item())
acc = correct / graph_data.test_mask.sum().item()
print('Accuracy: {:.4f}'.format(acc))
dist_diff = dis2pos-dis2neg
propogated = [w for w, _ in sorted([(idx2node[ix], diff) for ix, diff in enumerate(dist_diff.tolist())], key=lambda item:item[1], reverse=True)[:num_of_pattern_to_save]]
candidates = set([tuple(item.split()) for item in propogated])
print(len(candidates))
with open('{}/{}_g{}_r{}.txt'.format(output_directory, output_file_prefix, group, r), 'wb') as fw:
pickle.dump([item for item in propogated], fw, protocol=pickle.HIGHEST_PROTOCOL)
| null |
code-BC5CDR/propagate.py
|
propagate.py
|
py
| 8,256 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "random.seed",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.manual_seed",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.cuda.manual_seed_all",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "torch.LongTensor",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "torch.BoolTensor",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "torch.BoolTensor",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "torch.BoolTensor",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "torch.BoolTensor",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "torch_geometric.data.Data",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "torch.cuda.empty_cache",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "gnn.Graph",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.CosineSimilarity",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional.binary_cross_entropy",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.sigmoid",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.mse_loss",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 180,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.sigmoid",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "torch.nn.CosineSimilarity",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_name": "torch.zeros",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "pickle.HIGHEST_PROTOCOL",
"line_number": 211,
"usage_type": "attribute"
}
] |
204629126
|
from db import db
import datetime
class RequestModel(db.Model):
__tablename__ = 'requests'
request_types = ["stub", "Coffee", "Lunch", "Movie","Beer","Game"]
id = db.Column(db.Integer, primary_key=True)
request_state = db.Column(db.Integer)
request_type = db.Column(db.Integer)
requesting_user = db.Column(db.String(80))
requested_user = db.Column(db.String(80))
request_time = db.Column(db.String(80))
request_expiration_time = db.Column(db.String(80))
event_id = db.Column(db.Integer)
delay = db.Column(db.Integer)
def __init__(self, request_state, requesting_user, requested_user, request_time, request_expiration_time,
request_type, event_id, delay=0):
self.request_state = request_state
self.requesting_user = requesting_user
self.requested_user = requested_user
self.request_time = request_time
self.request_expiration_time = request_expiration_time
if request_type in RequestModel.request_types:
self.request_type = RequestModel.request_types.index(request_type)
else:
self.request_type = request_type
self.event_id = event_id
self.delay = delay
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
def json(self):
if self.request_type in RequestModel.request_types:
string_request_type = self.request_type
elif int(self.request_type) < len(RequestModel.request_types):
string_request_type = RequestModel.request_types[self.request_type]
else:
string_request_type = 'unknown'
return {'request_state': self.request_state, 'requesting_user': self.requesting_user,
'requested_user': self.requested_user, 'request_time': self.request_time,
"request_expiration_time": self.request_expiration_time,
"request_type": string_request_type,
"id": self.id, "event_id": self.event_id,"delay": self.delay}
@classmethod
def get_received_requests(cls, username):
user_requests = cls.query.filter_by(requested_user=username).all()
return user_requests
@classmethod
def get_active_sent_requests(cls, username):
user_requests = cls.query.filter_by(requesting_user=username).all()
return user_requests
@classmethod
def find_by_id(cls, _id):
return cls.query.filter_by(id=_id).first()
@classmethod
def find_by_filters(cls, requesting_user, requested_user, request_type):
print("filters: ", requesting_user,requested_user,request_type)
return cls.query.filter_by(requesting_user=requesting_user, requested_user=requested_user,
request_type=request_type).first()
@classmethod
def find_by_event_id(cls, event_id):
return cls.query.filter_by(event_id=event_id).all()
@classmethod
def expiration_to_datetime(cls, expiration_time):
if not expiration_time:
return None
if "." in expiration_time:
return datetime.datetime.strptime(expiration_time, "%Y-%m-%d %H:%M:%S.%f")
else:
return datetime.datetime.strptime(expiration_time, "%Y-%m-%d %H:%M:%S")
@classmethod
def clear_expired(cls):
current_time = datetime.datetime.now()
requests = cls.query.all()
expired = []
for request in requests:
if (request.request_expiration_time != '') and \
(current_time > cls.expiration_to_datetime(request.request_expiration_time)):
expired.append(request)
for request in expired:
print('expired deleting:', request.json())
request.delete_from_db()
@classmethod
def clear_abandoned(cls, events):
requests = cls.query.all()
event_ids = [event.id for event in events]
expired = []
for request in requests:
if request.event_id not in event_ids:
expired.append(request)
for request in expired:
request.delete_from_db()
@classmethod
def filter_duplicates(cls):
filtered = {}
requests = cls.query.all()
duplicated = []
for request in requests:
filter_params = (request.requesting_user, request.requested_user, request.request_type)
if filter_params in filtered:
exp1 = cls.expiration_to_datetime(filtered[filter_params].request_expiration_time)
exp2 = cls.expiration_to_datetime(request.request_expiration_time)
if exp1 > exp2:
duplicated.append(request)
else:
duplicated.append(filtered[filter_params])
filtered[filter_params] = request
else:
filtered[filter_params] = request
for request in duplicated:
print('filter deleting:', request.json())
request.delete_from_db()
@classmethod
def replace_event(cls,old_event_id, new_event_id):
filtered = {}
requests = cls.query.filter_by(event_id=old_event_id).all()
duplicated = []
for request in requests:
request.event_id = new_event_id
request.save_to_db()
@classmethod
def clear_requests(cls,request_type, requesting_user=None, request_state=None, requested_user=None, event_id=None):
args = {"request_type":request_type,
"requesting_user":requesting_user,
"request_state":request_state,
"requested_user":requested_user,
"event_id":event_id}
args = {k:v for k,v in args.items() if None != v}
print("args after:",args)
requests = cls.query.filter_by(**args).all()
for request in requests:
request.delete_from_db()
| null |
models/requests.py
|
requests.py
|
py
| 6,194 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "db.db.Model",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "db.db",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "db.db.Column",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "db.db",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "db.db.Integer",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "db.db.Column",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "db.db",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "db.db.Integer",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "db.db.Column",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "db.db",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "db.db.Integer",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "db.db.Column",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "db.db",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "db.db.String",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "db.db.Column",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "db.db",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "db.db.String",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "db.db.Column",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "db.db",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "db.db.String",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "db.db.Column",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "db.db",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "db.db.String",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "db.db.Column",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "db.db",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "db.db.Integer",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "db.db.Column",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "db.db",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "db.db.Integer",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "db.db.session.add",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "db.db.session",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "db.db",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "db.db.session.commit",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "db.db.session",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "db.db",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "db.db.session.delete",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "db.db.session",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "db.db",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "db.db.session.commit",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "db.db.session",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "db.db",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 89,
"usage_type": "attribute"
}
] |
495713248
|
## we are going to create a simple protocol for our for actions
# put - Put something into the space
# get - Retrieve something from the space to be processed, i.e. nobody else
# can then retrieve or read it
# read - As for get, but a copy stays in the space for others to read, or get
# get_wait - like get but waits for a match
# read_wait - like read but waits for a match
## requests
# PUT <bytes>\r\n\r\n<data>\r\n\r\n
# GET <bytes>\r\n\r\n<data>\r\n\r\n
# GET_WAIT <bytes>\r\n\r\n<data>\r\n\r\n
# READ <bytes>\r\n\r\n<data>\r\n\r\n
# READ_WAIT <bytes>\r\n\r\n<data>\r\n\r\n
## responses
# NOT_FOUND\r\n\r\n
# FOUND <bytes>\r\n\r\n<data>\r\n\r\n
from functools import partial
import pickle
import socket
import logging
import sys
from helpers import decode_tuple_data, encode_tuple
log = logging.getLogger()
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler(sys.stdout))
TERMINATOR = '\r\n\r\n'
class TCPTupleClient(object):
def __init__(self, host, port):
self.address = (host,port)
self.socket = None
def get_socket(self):
# no unix sockets here
if not self.socket:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(self.address)
return s
return self.socket
def make_request(self, action, t):
log.debug('making request: %s %s' % (action,t))
# get our socket
s = self.get_socket()
# send our request
d = encode_tuple(t)
s.send('%s %s%s%s%s' % (action.upper(), len(d),
TERMINATOR, d, TERMINATOR))
# wait for a response
# the response is either going to be NOT_FOUND or FOUND <size>
# so we'll read enough for FOUND and go from there
r = s.recv(6).strip() # we consume the space .. maybe
log.debug('initial response: %s' % r)
# is it found ?!
if r.lower() != 'found':
log.debug('not found')
# sadface, not found
# read out the rest of the message
s.recv(7)
return None
# if it's a put, we're done
if action.lower() == 'put':
return True
# much excitement! lets read how much data we get
# read until returns
buff = ''
while not buff.endswith(TERMINATOR):
buff += s.recv(1)
data_len = int(buff[:len(TERMINATOR)])
log.debug('data len: %s' % data_len)
# read in the tuple data
tuple_data = s.recv(data_len)
log.debug('tuple data: %s' % tuple_data)
# read the newline out
s.recv(2)
# get our tuple
found_tuple = decode_tuple_data(tuple_data)
log.debug('found tuple: ' + str(found_tuple))
return found_tuple
def __getattr__(self,a,*args):
# instead of putting a function for each api method
# we are cheating and simply sticking ourself into the
# attribute lookup
cmds = ['put','get','get_wait','read','read_wait']
if a in cmds:
# w00t, doing an api call ::dance::
# return a function which calls make request
# w/ the command we want
return partial(self.make_request,a)
# ::shrug::
return super(TCPTupleClient,self).__getattr__(a,*args)
| null |
client.py
|
client.py
|
py
| 3,345 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "logging.StreamHandler",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "socket.socket",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "helpers.encode_tuple",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "helpers.decode_tuple_data",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 105,
"usage_type": "call"
}
] |
355548407
|
# -*- coding: utf-8 -*-
# FeedCrawler
# Projekt von https://github.com/rix1337
import configparser
class CrawlerConfig(object):
_DEFAULT_CONFIG = {
'FeedCrawler': [
("auth_user", "str", ""),
("auth_hash", "str", ""),
("myjd_user", "str", ""),
("myjd_pass", "str", ""),
("myjd_device", "str", ""),
("port", "int", "9090"),
("prefix", "str", ""),
("interval", "int", "10"),
("english", "bool", "False"),
("surround", "bool", ""),
("proxy", "str", ""),
("fallback", "bool", "False"),
("closed_myjd_tab", "bool", "False"),
("one_mirror_policy", "bool", "False"),
("packages_per_myjd_page", "int", "3"),
("prefer_dw_mirror", "bool", "False")
],
'Hostnames': [
("sj", "str", ""),
("dj", "str", ""),
("sf", "str", ""),
("by", "str", ""),
("dw", "str", ""),
("fx", "str", ""),
("nk", "str", ""),
("ww", "str", ""),
("dd", "str", "")
],
'Crawljobs': [
("autostart", "bool", "True"),
("subdir", "bool", "True")
],
'Notifications': [
("homeassistant", "str", ""),
("pushbullet", "str", ""),
("telegram", "str", ""),
("pushover", "str", "")
],
'Hosters': [
("rapidgator", "bool", "True"),
("turbobit", "bool", "False"),
("uploaded", "bool", "False"),
("zippyshare", "bool", "False"),
("oboom", "bool", "False"),
("ddl", "bool", "False"),
("filefactory", "bool", "False"),
("uptobox", "bool", "False"),
("1fichier", "bool", "False"),
("filer", "bool", "False"),
("nitroflare", "bool", "False"),
("ironfiles", "bool", "False"),
("k2s", "bool", "False")
],
'Ombi': [
("url", "str", ""),
("api", "str", "")
],
'ContentAll': [
("quality", "str", "1080p"),
("search", "int", "10"),
("ignore", "str", "cam,subbed,xvid,dvdr,untouched,remux,avc,pal,md,ac3md,mic,xxx"),
("regex", "bool", "False"),
("cutoff", "bool", "True"),
("enforcedl", "bool", "False"),
("crawlseasons", "bool", "True"),
("seasonsquality", "str", "1080p"),
("seasonpacks", "bool", "False"),
("seasonssource", "str",
"web-dl.*-(tvs|4sj|tvr)|webrip.*-(tvs|4sj|tvr)|webhd.*-(tvs|4sj|tvr)|netflix.*-(tvs|4sj|tvr)|amazon.*-(tvs|4sj|tvr)|itunes.*-(tvs|4sj|tvr)|bluray|bd|bdrip"),
("imdbyear", "str", "2010"),
("imdb", "str", "0.0"),
("hevc_retail", "bool", "False"),
("retail_only", "bool", "False"),
("hoster_fallback", "bool", "False")
],
'ContentShows': [
("quality", "str", "1080p"),
("rejectlist", "str", "XviD,Subbed,HDTV"),
("regex", "bool", "False"),
("hevc_retail", "bool", "False"),
("retail_only", "bool", "False"),
("hoster_fallback", "bool", "False")
],
'CustomDJ': [
("quality", "str", "1080p"),
("rejectlist", "str", "XviD,Subbed"),
("regex", "bool", "False"),
("hoster_fallback", "bool", "False")
],
'CustomDD': [
("feeds", "str", ""),
("hoster_fallback", "bool", "False")
]
}
__config__ = []
def __init__(self, section, configfile):
self._configfile = configfile
self._section = section
self._config = configparser.RawConfigParser()
try:
self._config.read(self._configfile)
self._config.has_section(
self._section) or self._set_default_config(self._section)
self.__config__ = self._read_config(self._section)
except configparser.DuplicateSectionError:
print(u'Doppelte Sektion in der Konfigurationsdatei.')
raise
except:
print(u'Ein unbekannter Fehler in der Konfigurationsdatei ist aufgetreten.')
raise
def _set_default_config(self, section):
self._config.add_section(section)
for (key, key_type, value) in self._DEFAULT_CONFIG[section]:
self._config.set(section, key, value)
with open(self._configfile, 'w') as configfile:
self._config.write(configfile)
def _set_to_config(self, section, key, value):
self._config.set(section, key, value)
with open(self._configfile, 'w') as configfile:
self._config.write(configfile)
def _read_config(self, section):
return [(key, '', self._config.get(section, key)) for key in self._config.options(section)]
def _get_from_config(self, scope, key):
res = [param[2] for param in scope if param[0] == key]
if not res:
res = [param[2]
for param in self._DEFAULT_CONFIG[self._section] if param[0] == key]
if [param for param in self._DEFAULT_CONFIG[self._section] if param[0] == key and param[1] == 'bool']:
return True if len(res) and res[0].strip('\'"').lower() == 'true' else False
else:
return res[0].strip('\'"') if len(res) > 0 else False
def save(self, key, value):
self._set_to_config(self._section, key, value)
return
def get(self, key):
return self._get_from_config(self.__config__, key)
def get_section(self):
return self._config._sections[self._section]
| null |
feedcrawler/config.py
|
config.py
|
py
| 5,812 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "configparser.RawConfigParser",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "configparser.DuplicateSectionError",
"line_number": 116,
"usage_type": "attribute"
}
] |
503176092
|
from flask import Flask, redirect, url_for
def created_app(config=None):
app = Flask(__name__)
if config is not None:
app.config.from_object(config)
from dashboard.views import dashboard
app.register_blueprint(dashboard, url_prefix='/dashboard')
@app.route('/')
def home():
return "hola"
return app
| null |
app.py
|
app.py
|
py
| 362 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "dashboard.views.dashboard",
"line_number": 10,
"usage_type": "argument"
}
] |
325690417
|
import telebot
import googletrans
from googletrans import Translator
import config
bot = telebot.TeleBot(config.TOKEN)
translator = Translator()
@bot.message_handler(commands=['start'])
def send_welcome(message):
bot.reply_to(message, "Привет, я бот-переводчик с любого языка на английский, введи слово или фразу - и я переведу её! \n /help - список поддерживающихся языков")
@bot.message_handler(commands=['help'])
def send_help(message):
suppot_langs = googletrans.LANGUAGES
x = ''
for key, value in suppot_langs.items():
x += key + ' - ' + value + '\n'
bot.reply_to(message, x)
@bot.message_handler(content_types=['text'])
def tr_eng(message):
result = translator.translate(message.text)
bot.reply_to(message, result.text)
bot.polling()
| null |
bot.py
|
bot.py
|
py
| 882 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "telebot.TeleBot",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "config.TOKEN",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "googletrans.Translator",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "googletrans.LANGUAGES",
"line_number": 15,
"usage_type": "attribute"
}
] |
38919193
|
import random
from typing import Callable
import pygame
from text_surface_rectangle import Colors, TextRec
from input_data import Answer, Question
class Round:
def __init__(self, setup: Callable, question: Question, answer: Answer):
self.question = question
self.answer = answer
# For questions box number should be 4
self.setup = setup(box_number=4)
self.all_answers = self._pack_to_list(
self.answer.answer, self.answer.possibilities
)
self.default_surface_settings = {
"x": self.setup.padding,
"y": None, # this should be filled by surface
"color": Colors.color_green,
"font_size": 100,
"font_name": "arial",
"text": "",
}
self.question_settings = self.default_surface_settings.copy()
self.question_settings.update(
{"y": self.setup.req_question_y, "text": self.question.question}
)
self.possible_ans_settings = self._possibilities()
self.all_settings = self._pack_to_list(
self.question_settings,
random.sample(self.possible_ans_settings, len(self.possible_ans_settings)),
)
self.question_surface, *self.answer_surfaces = self.setup.create_text_boxes(
TextRec, self.all_settings
)
@staticmethod
def _pack_to_list(*args):
all_objects = list()
for arg in args:
if not isinstance(arg, list):
all_objects.append(arg)
else:
all_objects.extend(arg)
return all_objects
def _possibilities(self):
possible_ans = list()
for answer in self.all_answers:
default_dict = self.default_surface_settings.copy()
default_dict.update({"text": answer, "y": self.setup.req_height})
possible_ans.append(default_dict)
return possible_ans
def draw(self, display):
self.question_surface.draw(display)
for answer in self.answer_surfaces:
answer.draw(display)
def _event_button_pressed(self, display, event, color):
for button in self.answer_surfaces:
if button.rect.collidepoint(*event.pos):
self._change_rect_color(button, display, color)
pygame.event.set_blocked(pygame.MOUSEBUTTONUP)
return button
return None
def mark_answers(self, display, event):
button = self._event_button_pressed(
display, event=event, color=Colors.color_blue
)
if button:
self._check_answer(display, button)
# Delay to check the answer
pygame.time.delay(1000)
return True
return False
def change_to_default_color(self, display, color):
for button in self.answer_surfaces:
self._change_rect_color(button, display, color)
@staticmethod
def _change_rect_color(button, display, color):
button.rect_surf.fill(color=color)
button.draw(display)
pygame.display.flip()
def _check_answer(self, display, button):
if button.text == self.answer.answer:
self._change_rect_color(button, display, Colors.color_yellow)
pygame.time.delay(2000)
else:
self._change_rect_color(button, display, Colors.color_pink)
# Show correct answer
pygame.time.delay(1000)
self._show_correct_answer(display)
def _show_correct_answer(self, display):
for button in self.answer_surfaces:
if button.text == self.answer.answer:
self._change_rect_color(button, display, Colors.color_yellow)
| null |
rounds.py
|
rounds.py
|
py
| 3,737 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "typing.Callable",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "input_data.Question",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "input_data.Answer",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "text_surface_rectangle.Colors.color_green",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "text_surface_rectangle.Colors",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "random.sample",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "text_surface_rectangle.TextRec",
"line_number": 40,
"usage_type": "argument"
},
{
"api_name": "pygame.event.set_blocked",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "pygame.MOUSEBUTTONUP",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "text_surface_rectangle.Colors.color_blue",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "text_surface_rectangle.Colors",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "pygame.time.delay",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.flip",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "text_surface_rectangle.Colors.color_yellow",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "text_surface_rectangle.Colors",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "pygame.time.delay",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "text_surface_rectangle.Colors.color_pink",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "text_surface_rectangle.Colors",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "pygame.time.delay",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "text_surface_rectangle.Colors.color_yellow",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "text_surface_rectangle.Colors",
"line_number": 112,
"usage_type": "name"
}
] |
34551212
|
import base64
import time
import os
import cv2
from flask import Flask, request, render_template
import re
from ImageProcess.ImageProcess import ImageProcess , Calibration
from ArduinoSerial import ArduinoSerial
from PIL import Image
from io import StringIO, BytesIO
import numpy as np
app = Flask(__name__)
app.config['CORS_HEADERS'] = 'Content-Type'
calibration= None
@app.route('/')
def index():
return render_template('index.html')
@app.route('/capture.html', methods=['GET'])
def capture():
return render_template('capture.html')
def index():
return render_template('index.html')
@app.route('/apiCapture', methods=['POST'])
def get_image():
image_b64 = request.values['canvas_data']
print(image_b64)
base64_data = re.sub('^data:image/.+;base64,', '', image_b64)
byte_data = base64.b64decode(base64_data)
image_data = BytesIO(byte_data)
img = Image.open(image_data)
t = time.time()
imgPath= "Image/"+str(t) + '.png'
img.save(imgPath, "PNG")
analyse_img(imgPath)
#image_PIL = Image.open(StringIO(image_b64))
#image_np = np.array(image_PIL)
#print('Image received: {}'.format(image_np.shape))
return ''
def analyse_img(imgPath):
print(calibration)
imageProcess = ImageProcess(calibration.eq_x0, calibration.eq_x1, calibration.size_face)
imageProcess.load_img(imgPath)
if imageProcess.compute():
arduino.send(-imageProcess.x_delta,imageProcess.y_delta,100)
print(imageProcess)
os.remove(imgPath)
if __name__ == "__main__":
calibration = Calibration()
calibration.load(['ImageProcess/img/1m.jpg', 'ImageProcess/img/2m.jpg', 'ImageProcess/img/3m.jpg'],
[100, 200, 300])
calibration.compute()
arduino = ArduinoSerial()
arduino.connect("COM7")
app.run()
| null |
server.py
|
server.py
|
py
| 1,811 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "flask.request.values",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "re.sub",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "base64.b64decode",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "ImageProcess.ImageProcess.ImageProcess",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "ImageProcess.ImageProcess.Calibration",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "ArduinoSerial.ArduinoSerial",
"line_number": 64,
"usage_type": "call"
}
] |
582111206
|
from itertools import groupby
import numpy as np
from os import listdir, makedirs
from os.path import exists, isdir, join
import vtk
from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk
from ramos.io.Base import DataSource, DataSink
from ramos.utils.vectors import decompose
from ramos.utils.vtk import mass_matrix, write_to_file, get_cell_indices
class VTKTimeDirsSource(DataSource):
def __init__(self, paths):
self.paths = paths
files = set(listdir(paths[0]))
for path in paths[1:]:
files = files & set(listdir(path))
self.files = list(files)
dataset = self.dataset(0, 0)
xmin, xmax, ymin, ymax, zmin, zmax = dataset.GetBounds()
variates = [
abs(a - b) > 1e-5
for a, b in ((xmin,xmax), (ymin,ymax), (zmin,zmax))
]
pardim = sum(variates)
super(VTKTimeDirsSource, self).__init__(pardim, len(paths))
self.variates = [i for i, v in enumerate(variates) if v]
for fi in range(len(self.files)):
dataset = self.dataset(0, fi)
pointdata = dataset.GetPointData()
for i in range(pointdata.GetNumberOfArrays()):
name = pointdata.GetArrayName(i)
ncomps = pointdata.GetAbstractArray(i).GetNumberOfComponents()
size = pointdata.GetAbstractArray(i).GetNumberOfTuples()
self.add_field(name, ncomps, size, file_index=fi)
def dataset(self, path_index, file_index):
reader = vtk.vtkDataSetReader()
reader.SetFileName(join(self.paths[path_index], self.files[file_index]))
reader.Update()
return reader.GetOutput()
def field_mass_matrix(self, field):
return mass_matrix(self.dataset(0, field.file_index), self.variates)
def field_coefficients(self, field, level=0):
dataset = self.dataset(level, field.file_index)
pointdata = dataset.GetPointData()
array = pointdata.GetAbstractArray(field.name)
return vtk_to_numpy(array)
def tesselate(self, field, level=0):
field = self.field(field)
dataset = self.dataset(level, field.file_index)
points = vtk_to_numpy(dataset.GetPoints().GetData())
x, y = (points[...,i] for i in self.variates)
coeffs = vtk_to_numpy(dataset.GetPointData().GetAbstractArray(field.name))
cell_indices = get_cell_indices(dataset)
if cell_indices.shape[1] > 3:
return (x, y), coeffs
return (x, y, cell_indices), coeffs
def sink(self, *args, **kwargs):
return VTKTimeDirsSink(self, *args, **kwargs)
class VTKTimeDirsSink(DataSink):
def __init__(self, parent, path):
self.parent = parent
self.path = path
self.paths = []
def __enter__(self):
if not exists(self.path):
makedirs(self.path)
if not isdir(self.path):
raise IOError
return self
def __exit__(self, type_, value, backtrace):
pass
def add_level(self, time):
path = join(self.path, str(time))
if not exists(path):
makedirs(path)
if not isdir(self.path):
raise IOError
self.paths.append(path)
def write_fields(self, level, coeffs, fields):
fields = [self.parent.field(f) for f in fields]
data = list(zip(fields, decompose(fields, coeffs)))
key = lambda d: d[0].file_index
data = sorted(data, key=key)
for file_index, field_data in groupby(data, key):
dataset = self.parent.dataset(0, file_index)
pointdata = dataset.GetPointData()
while pointdata.GetNumberOfArrays() > 0:
pointdata.RemoveArray(0)
for field, coeffs in field_data:
array = numpy_to_vtk(coeffs, deep=1)
array.SetName(field.name)
pointdata.AddArray(array)
write_to_file(dataset, join(self.paths[level], self.parent.files[file_index]))
| null |
ramos/io/VTKTimeDirs.py
|
VTKTimeDirs.py
|
py
| 4,013 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "ramos.io.Base.DataSource",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "vtk.vtkDataSetReader",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "ramos.utils.vtk.mass_matrix",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "vtk.util.numpy_support.vtk_to_numpy",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "vtk.util.numpy_support.vtk_to_numpy",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "vtk.util.numpy_support.vtk_to_numpy",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "ramos.utils.vtk.get_cell_indices",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "ramos.io.Base.DataSink",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "ramos.utils.vectors.decompose",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "itertools.groupby",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "vtk.util.numpy_support.numpy_to_vtk",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "ramos.utils.vtk.write_to_file",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 114,
"usage_type": "call"
}
] |
59115259
|
import xml.etree.ElementTree as ET
import urllib
from bs4 import BeautifulSoup
import urllib2
import ssl
import json
import eventlet
import shelve
def remove_non_ascii(text):
return ''.join([i if ord(i) < 128 else ' ' for i in text])
d = shelve.open("thedictionary")
bookset = set();
urllist = [];
tree = ET.parse('treegen.xml')
root = tree.getroot()
for child in root:
bookstr = child[8].text;
bookstr = remove_non_ascii(bookstr)
bookstr = str(bookstr.encode('utf-8').decode('ascii', 'ignore'))
if bookstr != 'N/A' :
bookList = bookstr.split(',')
for book in bookList :
bookset.add(book)
for child in bookset:
f = { 'search' : child}
cdj = urllib.urlencode(f);
shru = "https://www.librarything.com/ajax_newsearch.php?" + cdj +"&searchtype=media"
urllist.append(shru);
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
for u in urllist :
rf = urllib2.urlopen(u,context = ctx)
rEnc = rf.read()
rEnc = json.loads(rEnc);
rHtml = rEnc['text'].decode('base64')
rf.close()
soup1 = BeautifulSoup(rHtml,"html.parser")
if soup1.table :
one = soup1.find_all('tr')
td = one[0].find_all('td')
a = td[1].find_all('a')
d[u] = a[0].text
| null |
Scrapper.py
|
Scrapper.py
|
py
| 1,266 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "shelve.open",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "urllib.urlencode",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "ssl.create_default_context",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "ssl.CERT_NONE",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "urllib2.urlopen",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 44,
"usage_type": "call"
}
] |
235407483
|
#!/usr/bin/python
import datetime, openpyxl as xl, os
from argparse import Namespace
import code
import operator, collections, re, argparse
#Django Imports
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction, connection
import contacts.models as cont
import backend.models as back
import command_utils
from transports.email import email
import transports.africas_talking.api as at
class Command(BaseCommand):
'''Cron commands to manage project '''
help = "Tools for doing specific tasks"
def add_arguments(self,parser):
parser.add_argument('-t','--time',action='store_true',default=False,help='print timing information')
subparsers = parser.add_subparsers(help='cron task to run')
fix_trans = subparsers.add_parser('fix_trans',cmd=parser.cmd,help='fix translations html strings')
fix_trans.add_argument('--dry-run',action='store_true',default=False,help='dry-run: no permenant changes')
fix_trans.set_defaults(action='fix_trans')
add_auto_trans = subparsers.add_parser('auto_trans',cmd=parser.cmd,help='add auto translations')
add_auto_trans.add_argument('--dry-run',action='store_true',default=False,help='dry-run: no permenant changes')
add_auto_trans.set_defaults(action='auto_trans')
def handle(self,*args,**options):
self.options = options
start = datetime.datetime.now()
getattr(self,options['action'])()
if options['time']:
self.stdout.write("Duration: {}".format( (datetime.datetime.now() - start)))
def fix_trans(self):
""" Fix translation html messages striping <br> and """
regex = re.compile(" |<br>")
translations = cont.Message.objects.filter(translation_status="done")
total , changed = 0 , 0
for msg in translations:
total += 1
if regex.search(msg.translated_text):
self.stdout.write( msg.translated_text )
changed += 1
new_text = regex.sub(' ',msg.translated_text)
msg.translated_text = new_text
if not self.options['dry_run']:
msg.save()
self.stdout.write( new_text )
self.stdout.write("\n")
self.stdout.write("Total Translations: {} Changed: {}".format(total,changed) )
def auto_trans(self):
""" Add English translations to all auto messages
Timing Info:
Transaction = False | Prefetch = False
Quries: 6712
Total AutomatedMessages: 2259 English: 775 Changed: 1484 Not Found 0
Duration: 0:00:49.653928
Transaction = False | Prefetch = True
Quries: 4454
Total AutomatedMessages: 2259 English: 775 Changed: 1484 Not Found 0
Duration: 0:00:47.388513
Transaction = False | Prefetch = True
Quries: 5229
Total AutomatedMessages: 2259 English: 775 Changed: 1484 Not Found 0
Transaction = True | Prefetch = True
Quries: 2971
Total AutomatedMessages: 2259 English: 775 Changed: 1484 Not Found 0
Duration: 0:00:03.175598
"""
auto_messages = cont.Message.objects.filter(translation_status='auto').prefetch_related('contact')
with transaction.atomic():
counts = Namespace(total=0,changed=0,not_found=[],english=0)
for msg in auto_messages:
counts.total += 1
if msg.contact.language == 'english':
counts.english += 1
continue
auto_message = back.AutomatedMessage.objects.from_description(msg.auto,exact=True)
if auto_message:
msg.translated_text = auto_message.english
counts.changed += 1
if not self.options['dry_run']:
msg.save()
else:
counts.not_found.append(msg.auto)
self.stdout.write("Quries: {}".format(len(connection.queries)))
self.stdout.write( ("Total AutomatedMessages: {0.total} English: {0.english} " +\
"Changed: {0.changed} Not Found {1}").format(counts,len(counts.not_found)))
if counts.not_found:
self.stdout.write("Not Found: {}".format(len(counts.not_found)))
for description in counts.not_found:
self.stdout.write("\t{}".format(description))
| null |
utils/management/commands/tools.py
|
tools.py
|
py
| 4,612 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "contacts.models.Message.objects.filter",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "contacts.models.Message",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "contacts.models",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "contacts.models.Message.objects.filter",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "contacts.models.Message",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "contacts.models",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "django.db.transaction.atomic",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "django.db.transaction",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "argparse.Namespace",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "backend.models.AutomatedMessage.objects.from_description",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "backend.models.AutomatedMessage",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "backend.models",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "django.db.connection.queries",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "django.db.connection",
"line_number": 107,
"usage_type": "name"
}
] |
12890024
|
import os
from flask import send_file
from openpyxl import Workbook
from amdos import app
from amdos.models import Time
def get_stats(t: Time):
wb = Workbook()
wb.active.title = str(t.time_start.strftime('%d-%m-%Y %H-%M'))
ws = wb.active
ws['A1'] = "ID"
ws['B1'] = "Type"
ws['C1'] = "Seat"
ws['D1'] = "Attended"
ws['E1'] = "Booking Reference"
ws['F1'] = "Payment Status"
ws['G1'] = "Customer Name"
ws['H1'] = "Email Address"
ws['I1'] = "Phone Number"
tickets = t.get_tickets()
for i in range(len(tickets)):
ticket = tickets[i]
booking = ticket.get_booking()
customer = ticket.get_customer()
ws.cell(i+1, 1, ticket.id)
ws.cell(i+1, 2, ticket.type)
ws.cell(i+1, 3, ticket.seat)
ws.cell(i+1, 4, ticket.attended_status)
ws.cell(i+1, 5, booking.booking_reference)
ws.cell(i+1, 6, booking.payment_status)
ws.cell(i+1, 7, customer.full_name)
ws.cell(i+1, 8, customer.email_address)
ws.cell(i+1, 9, customer.phone_number)
return wb
@app.route('/time/<id>/stats')
def stats_time(id):
f = get_stats(Time.query.get_or_404(id)).save(os.path.join(app.root_path, '.stats-temp.xlsx'))
return send_file('.stats-temp.xlsx', attachment_filename='amdos stats.xlsx')
| null |
amdos/routes/stats.py
|
stats.py
|
py
| 1,321 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "amdos.models.Time",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "openpyxl.Workbook",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "amdos.models.Time.query.get_or_404",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "amdos.models.Time.query",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "amdos.models.Time",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "amdos.app.root_path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "amdos.app",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "flask.send_file",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "amdos.app.route",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "amdos.app",
"line_number": 44,
"usage_type": "name"
}
] |
389289717
|
import numpy as np
from scipy.spatial import distance_matrix
def pq(data, P, init_centroids, max_iter):
data = np.array(data, dtype='float32')
init_centroids = np.array(init_centroids, dtype='float32')
N, M = data.shape
K = 256
MP = int(M / P)
result_centroids = np.array([])
for i in range(P):
sliced_data = data[:, i * MP:(i + 1) * MP]
centroids = init_centroids[i,].copy()
for _ in range(max_iter):
row_dis = distance_matrix(sliced_data, centroids, 1)
row_label = np.argmin(row_dis, axis=1)
for j in range(K):
index_list = np.where(row_label == j)[0]
temp_data = [sliced_data[index] for index in index_list]
if len(temp_data) != 0:
centroids[j] = np.median(temp_data, axis=0)
row_dis = distance_matrix(sliced_data, centroids, 1)
row_label = np.argmin(row_dis, axis=1)
if i == 0:
result_label = np.array([row_label]).T.copy()
result_centroids = np.array([centroids.copy()])
else:
temp = np.append(result_centroids, centroids)
dim = result_centroids.shape
result_centroids = temp.reshape(dim[0] + 1, dim[1], dim[2])
result_label = np.column_stack((result_label, row_label)).astype(np.uint8)
return result_centroids, result_label
def index_conv(index, sorted_matrix):
return [sorted_matrix[i][index[i]] for i in range(len(index))]
def distance(sorted_index, dis_matrix):
return sum([dis_matrix[i][sorted_index[i]] for i in range(len(sorted_index))])
def query(queries, codebooks, codes, T):
#print(codes)
Q, M = queries.shape
P, K, _ = codebooks.shape
N, P = codes.shape
MP = int(M / P)
candidates = []
inverted_index = {}
for i in range(N):
if tuple(codes[i]) not in inverted_index.keys():
inverted_index[tuple(codes[i])] = [i]
else:
inverted_index[tuple(codes[i])].append(i)
one_list = np.array([np.zeros((P)).astype(np.int32)] * P)
for i in range(P):
one_list[i][i] = 1
for q in range(Q):
for i in range(P):
sliced_data = queries[q, i * MP:(i + 1) * MP]
row_dis = distance_matrix([sliced_data], codebooks[i], 1)
sort_index = row_dis[0].argsort()
if i == 0:
sorted_matrix = np.array([sort_index])
dis_matrix = np.array(row_dis)
else:
sorted_matrix = np.append(sorted_matrix, [sort_index], axis=0)
dis_matrix = np.append(dis_matrix, row_dis, axis=0)
distance_dict = {(0,) * P:distance(index_conv([0] * P, sorted_matrix), dis_matrix)}
used_index = {}
temp_candidate = set()
while len(temp_candidate) < T:
minimal_index = min(distance_dict, key = distance_dict.get)
minimal_invert_index = tuple([sorted_matrix[i][minimal_index[i]] for i in range(len(minimal_index))])
if minimal_invert_index in inverted_index.keys():
add_set = tuple(inverted_index[minimal_invert_index])
temp_candidate = temp_candidate.union(add_set)
distance_dict.pop(minimal_index)
used_index[minimal_index] = True
for one in one_list:
new_index = tuple(one + list(minimal_index))
if new_index not in used_index.keys() and max(new_index) < 256:
distance_dict[new_index] = distance(index_conv(new_index, sorted_matrix), dis_matrix)
candidates.append(temp_candidate)
return candidates
| null |
Project/submission.py
|
submission.py
|
py
| 3,700 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.array",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.distance_matrix",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.distance_matrix",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.column_stack",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "scipy.spatial.distance_matrix",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 83,
"usage_type": "call"
}
] |
573429282
|
#!/usr/bin/env python
"""
Created:
Author: ???
Date: ???
Description: (LRP)
EMIR ETC python main script. These routines consists of an underlying python
script to claculate exposure times for EMIR Photometry and Spectroscopy with
a wrapper (also in python) to make the scripts usable online
The underlying python script was written by Carlos Gonzalez-Fernandez
(cambridge) and the wrapper was written by Matteo Miluzio (ESAC)
v1.0.8 14-04-2017
Standard structure for all filter files
Improved standardisation of etc_modules
Added addtional output for spectroscopy
Corrected bugs that distored S/N estimate
Added new sky emission and transmission files at below 1.0um
v1.0.7 20-03-2017
Used standard structure for (some) filter files
(others need to be implemented)
Correct a bug regarding the median values of the S/N
v.1.0.6 27-01-2017
Updated the previous change to the resolution element calculation.
More updates of code standardisation (still not complete!)
v.1.0.5 09-12-2016
In this file, updated the way the resolution element is calculated in
getSpecSton
v.1.0.4 08-12-2016
Major updates of code standardisation
Merged with ETC from Marc Balcells
Much more ouput for spec. mode
v.1.0.3 05-12-2016
Included fix for using 'Model file' that now does not depend upon the
input magnitude
Added F123M filter at request of Marc Balcels
Updated 07-12-2016 to correct F123M transmission
TODO: Make better naming choices for variables
Work out where the slowest parts of this code are
Trim down the ETC to speed up the process
Author: LRP [email protected]
Date: 28-11-2016
Description:
Updated to correct the output exposure times in output xml file and figures
Updated to add the Y filter as an option in photometry
Added version numer as v1.0
"""
import numpy as np
import sys
import xml.etree.ElementTree as ET
from optparse import OptionParser
import emir_guy
import etc_config as con
import etc_modules as mod
from etc_classes import SpecCurve
import matplotlib
matplotlib.use('Agg') # Do we actually need agg?
import matplotlib.pylab as plt
description = ">> Exposure Time Calculator for EMIR. Contact Lee Patrick"
usage = "%prog [options]"
if len(sys.argv) == 1:
print(help)
sys.exit()
parser = OptionParser(usage=usage, description=description)
parser.add_option("-d", "--directory", dest="directory",
default='', help='Path of the xml file \n [%default]')
option, args = parser.parse_args()
class EmirGui:
"""GUI for the ETC"""
def __init__(self):
"""Initialise"""
# When the application is loaded, all the fixed elements of the system
# (optics, etc.) plus the sky curves are loaded
global ff
config_files = con.get_config()
# Changed to 80000 by LRP on 04-04-2017
self.ldo_hr = (8000 + np.arange(100001)*0.2)*1e-4
# Fixed elements of the system
qe = SpecCurve(config_files['qe'])
optics = SpecCurve(config_files['optics'])
tel = SpecCurve(config_files['telescope'])
# Addition from MCB's ETC by LRP
self.qe_hr = qe.interpolate(self.ldo_hr)
self.optics_hr = optics.interpolate(self.ldo_hr)
self.tel_hr = tel.interpolate(self.ldo_hr)
self.trans = self.qe_hr*self.optics_hr*self.tel_hr
# End addition
# Vega spectrum for normalizations
self.vega = SpecCurve(config_files['vega']).interpolate(self.ldo_hr)
try:
ff = emir_guy.readxml(args[0] + '.xml')
except:
print("ERROR opening XML file")
exit()
emir_guy.load(self)
emir_guy.check_inputs(ff, args[0])
# Functions for options:
if ff['operation'] == 'Photometry':
self.doPhotometry()
elif ff['operation'] == 'Spectroscopy':
self.doSpectroscopy()
def doPhotometry(self):
"""Photometry initialisations"""
self.mode_oper = 'ph'
# Obtaining configuration parameters from GUI
self.mag = float(ff['magnitude'])
self.seeing = float(ff['seeing'])
self.airmass = float(ff['airmass'])
self.sky_t, self.sky_e = mod.interpolatesky(self.airmass, self.ldo_hr)
self.filtname = ff['photo_filter']
self.buildObj()
# We have to break the texp into its bits
temp = ff['photo_exp_time'].split('-')
if len(temp) == 1:
# This creates a one length array, so that len(texp) doesn't crash
self.texp = np.array([float(temp[0])])
self.timerange = 'Single'
else:
tmin = float(temp[0])
tmax = float(temp[1])
self.texp = tmin + (tmax - tmin)*np.arange(100)/99.
self.timerange = 'Range'
# Number of frames
self.nobj = float(ff['photo_nf_obj'])
self.nsky = float(ff['photo_nf_sky'])
# Filter transmission curve
self.filt = con.get_filter(self.filtname)
self.filt_hr = self.filt.interpolate(self.ldo_hr)
# Calling the function that calculates the STON
ston, signal_obj, signal_sky, saturated,\
params = self.getPhotSton(self.texp, self.nobj, self.nsky)
if self.timerange == 'Range':
# self.printResults(self.texp,ston,saturated)
self.printXML(self.texp, signal_obj, signal_sky,
ston, saturated, **params)
plt.plot(self.texp*self.nobj, ston) # Update by LRP 28-11-2016
plt.xlabel('Exposure time (seconds)')
if ff['source_type'] == 'Point':
plt.ylabel('S/N')
if ff['source_type'] == 'Extended':
plt.ylabel('S/N per pixel')
plt.savefig(args[0] + '_photo.png')
else:
self.printXML(self.texp, signal_obj, signal_sky,
ston, saturated, **params)
# TODO: Create some meaniningful graphic output!
def doSpectroscopy(self):
"""Spectroscopy initialisations"""
self.mode_oper = 'sp'
# Obtaining configuration parameters from GUI
self.mag = float(ff['magnitude'])
self.seeing = float(ff['seeing'])
self.airmass = float(ff['airmass'])
self.slitwidth = float(ff['spec_slit_width'])
self.slitloss = mod.slitpercent(self.seeing, self.slitwidth)
self.sky_t, self.sky_e = mod.interpolatesky(self.airmass, self.ldo_hr)
self.grismname = ff['spec_grism']
self.buildObj()
# We have to break the texp into its bits
temp = ff['spec_exp_time'].split('-')
if len(temp) == 1:
# This creates a one length array, so that len(texp) does't crash
self.texp = np.array([float(temp[0])])
self.timerange = 'Single'
else:
tmin = float(temp[0])
tmax = float(temp[1])
self.texp = tmin + (tmax - tmin)*np.arange(10)/9.
self.timerange = 'Range'
# Number of frames
self.nobj = float(ff['spec_nf_obj'])
self.nsky = float(ff['spec_nf_sky'])
# The filter transmission curve
#
self.specres, self.grism, self.filt = con.get_grism(self.grismname)
self.filt_hr = self.filt.interpolate(self.ldo_hr)
self.grism_hr = self.grism.interpolate(self.ldo_hr)
self.dispersive = self.filt_hr*self.grism_hr
# Addition from MCB's ETC by LRP
self.efftotal_hr = self.tel_hr*self.optics_hr*self.filt_hr*\
self.grism_hr*self.qe_hr
#
# Calling the function that calculates the STON
#
if self.timerange == 'Single':
ston, src_cnts, sky_cnts, sp,\
saturated, params = self.getSpecSton(self.texp, self.nobj,
self.nsky)
if ff['template'] == 'Emission line':
self.printXML(self.texp, [np.max(src_cnts)],
[np.median(sky_cnts[np.nonzero(sky_cnts)])],
[np.max(ston)],
saturated, **params)
else:
self.printXML(self.texp,
[np.median(src_cnts[np.nonzero(src_cnts)])],
[np.median(sky_cnts[np.nonzero(sky_cnts)])],
[np.median(ston[np.nonzero(ston)])],
saturated, **params)
# Create some figures:
plt.figure(1, figsize=(15., 10.))
plt.subplot(321)
plt.plot(self.ldo_px, ston, color='b')
med_spec = np.median(ston[np.nonzero(ston)])
x_med = np.linspace(self.ldo_px[0], self.ldo_px[-1])
plt.plot(x_med, np.linspace(med_spec, med_spec), color='r')
plt.xlim(self.ldo_px[0], self.ldo_px[-1])
plt.xlabel('Wavelength (micron)')
if ff['source_type'] == 'Point':
plt.ylabel('S/N')
if ff['source_type'] == 'Extended':
plt.ylabel('S/N per pixel')
plt.subplot(323)
plt.plot(self.ldo_px, src_cnts)
plt.plot(self.ldo_px, sky_cnts)
plt.xlim(self.ldo_px[0], self.ldo_px[-1])
plt.xlabel('Wavelength (micron)')
plt.ylabel('Source ADU/pixel')
plt.grid()
plt.subplot(325)
plt.plot(self.ldo_px, sp)
plt.xlim(self.ldo_px[0], self.ldo_px[-1])
plt.xlabel('Wavelength (micron)')
plt.ylabel('Normalized src flux')
plt.subplot(322)
plt.plot(self.ldo_hr, self.qe_hr, '-r', label='Det')
plt.plot(self.ldo_hr, self.grism_hr, '--c', label='Grism')
plt.plot(self.ldo_hr, self.filt_hr, '-c', label='Filter')
plt.plot(self.ldo_hr, self.optics_hr, '-b', label='Optics')
plt.plot(self.ldo_hr, self.tel_hr, '--b', label='Tel')
plt.plot(self.ldo_hr, self.efftotal_hr, '-k', label='Qtot')
plt.xlim(self.ldo_px[0], self.ldo_px[-1])
plt.legend(bbox_to_anchor=(1.3, 1.05))
plt.xlabel('Wavelength (micron)')
plt.ylabel('efficiency / band')
plt.subplot(324)
plt.plot(self.ldo_hr, self.efftotal_hr, '-k', label='Qtot')
plt.xlim(self.ldo_px[0], self.ldo_px[-1])
plt.legend(bbox_to_anchor=(1.3, 1.05))
plt.xlabel('Wavelength (micron)')
plt.ylabel('Eff Tel to Det')
plt.subplot(326)
plt.plot(self.ldo_hr, self.qe_hr, '-r', label='Det')
plt.plot(self.ldo_hr, self.grism_hr, '--c', label='Grism')
plt.plot(self.ldo_hr, self.filt_hr, '-c', label='Filter')
plt.plot(self.ldo_hr, self.optics_hr, '-b', label='Optics')
plt.plot(self.ldo_hr, self.tel_hr, '--b', label='Tel')
plt.plot(self.ldo_hr, self.efftotal_hr, '-k', label='Qtot')
plt.legend(bbox_to_anchor=(1.3, 1.05))
plt.xlabel('Wavelength (micron)')
plt.ylabel('Efficiency full EMIR range')
# End of figures
if self.timerange == 'Range':
ston = np.zeros_like(self.texp)
saturated = np.zeros_like(self.texp)
src_med_cnts = np.zeros_like(self.texp)
sky_med_cnts = np.zeros_like(self.texp)
for i in range(len(self.texp)):
temp, src_cnts, sky_cnts, sp, satur, params\
= self.getSpecSton(self.texp[i], self.nobj, self.nsky)
ston[i] = np.median(temp[np.nonzero(temp)])
saturated[i] = satur
src_med_cnts[i] = np.median(src_cnts[np.nonzero(src_cnts)])
sky_med_cnts[i] = np.median(sky_cnts[np.nonzero(sky_cnts)])
self.printXML(self.texp, src_med_cnts, sky_med_cnts,
ston, saturated, **params)
# self.printXML(self.texp,src_cnts,sky_cnts,ston,saturated,params)
temp, src_cnts, sky_cnts, sp, temp2, params\
= self.getSpecSton(self.texp[i], self.nobj, self.nsky)
# Additional figure for an inputed range of exposure times
plt.figure(1)
plt.subplot(211)
plt.plot(self.ldo_px, temp)
plt.xlabel('Wavelength (micron)')
if ff['source_type'] == 'Point':
plt.ylabel('S/N at texp = {0:.1f}'.format(self.texp[-1]))
if ff['source_type'] == 'Extended':
plt.ylabel('S/N per pixel at texp = {0:.1f}'
.format(self.texp[-1]))
plt.subplot(212)
plt.plot(self.ldo_px, sp)
plt.xlabel('Wavelength (micron)')
plt.ylabel('Normalized src flux')
plt.savefig(args[0]+'_spec.png')
def getSpecSton(self, texp=1, nobj=1, nsky=1):
"""For Spectroscopy Get SignaltoNoise (Ston)"""
params = con.get_params()
# The skymagnitude works because the catchall is Ks, and
# the other grisms have the same name as their respective filters
self.mag_sky = con.get_skymag(self.grismname)
# 1.- Calculate the wavelengths visible in the detector
self.cenwl = (self.ldo_hr*self.dispersive).sum()/(self.dispersive).sum()
self.dpx = (self.cenwl/self.specres)/3.
self.res_ele = self.dpx*(self.slitwidth/(params['scale']))
self.ldo_px = (np.arange(2048) - 1024)*self.dpx + self.cenwl
# 2.- Scale object & sky with Vega. Note: the per angstrom dependence
# of the SED is removed later, when the ldo per pixel is calculated
# In case of an emission line, there is no need to re-normalize
# CGF 02/12/16
if ff['template'] == 'Emission line':
no = self.obj*params['area']
elif (ff['template'] == 'Model file') & \
(self.obj_units != 'normal_photon'):
no = self.obj*params['area']
else:
no = (10**(-1*self.mag/2.5))*\
mod.vega(self.obj, self.vega, self.filt_hr)*params['area']
# 3.- Convolve the SEDs with the proper resolution
# Delta(lambda) is evaluated at the central wavelength
con_obj = mod.convolres(self.ldo_hr,
texp*self.slitloss*(no*self.dispersive*
self.trans*self.sky_t),
self.res_ele)
# Added by LRP from FGL's code:
# Get this working for the HK grism then we can tune it up and add the
# YJ girms
if self.grismname == 'HK':
# Split spectra into two parts:
# lambda:
lhr_b = np.arange(12000, 18998, .2)*1e-4
lhr_r = np.arange(19000, 27000, .2)*1e-4
# Vega:
vflux_b = mod.spec_int(self.ldo_hr, self.vega, lhr_b)
vflux_r = mod.spec_int(self.ldo_hr, self.vega, lhr_r)
# Sky:
sflux_b = mod.spec_int(self.ldo_hr, self.sky_e, lhr_b)
sflux_r = mod.spec_int(self.ldo_hr, self.sky_e, lhr_r)
# Filter transmission:
ftr_b = mod.spec_int(self.ldo_hr, self.filt_hr, lhr_b).clip(0, 1)
ftr_r = mod.spec_int(self.ldo_hr, self.filt_hr, lhr_r).clip(0, 1)
# Dispersion (fliter*grism)
disp_b = mod.spec_int(self.ldo_hr, self.dispersive, lhr_b).clip(0, 1)
disp_r = mod.spec_int(self.ldo_hr, self.dispersive, lhr_r).clip(0, 1)
# Telescope optics:
otr_b = mod.spec_int(self.ldo_hr, self.trans, lhr_b).clip(0, 1)
otr_r = mod.spec_int(self.ldo_hr, self.trans, lhr_r).clip(0, 1)
# Calculate ns -- sky spectrum for each filter scaled to vega
ns_b = (10**(-1*con.get_skymag('H')/2.5))*\
mod.vega(sflux_b, vflux_b, ftr_b)*params['area']
ns_r = (10**(-1*con.get_skymag('K')/2.5))*\
mod.vega(sflux_r, vflux_r, ftr_r)*params['area']
# import pdb; pdb.set_trace()
# Sky spectra for ETC parameters
# scale sky spectrum by ETC parameters for:
# time exposed, optics, filter and grirms
# disp = (filt*grism)
sky_hr_b = texp*(ns_b*otr_b*disp_b) # *self.slitloss*0.2
sky_hr_r = texp*(ns_r*otr_r*disp_r) # *self.slitloss*0.2
# Sky spectra at correct resolution
con_sky_b = mod.convolres(lhr_b, sky_hr_b, self.res_ele)
con_sky_r = mod.convolres(lhr_r, sky_hr_r, self.res_ele)
sp_sky_blue = self.dpx*mod.spec_int(lhr_b,
con_sky_b*params['scale']**2,
self.ldo_px)
sp_sky_red = self.dpx*mod.spec_int(lhr_r,
con_sky_r*params['scale']**2,
self.ldo_px)
# Cheap fix to an interpolation problem:
sp_sky_blue[np.where(self.ldo_px > 1.9)] = 0.0
sp_sky_red[np.where(self.ldo_px < 1.9)] = 0.0
sp_sky = sp_sky_blue + sp_sky_red
else:
# Sky
ns = (10**(-1*self.mag_sky/2.5))*\
mod.vega(self.sky_e, self.vega, self.filt_hr)*params['area']
con_sky = mod.convolres(self.ldo_hr,
texp*(ns*self.dispersive*self.trans),
self.res_ele)
# 4.- Interpolate SEDs over the observed wavelengths
# and estimate the Signal to Noise (STON)
sp_sky = self.dpx*mod.spec_int(self.ldo_hr,
con_sky*params['scale']**2,
self.ldo_px)
if ff['source_type'] == 'Point':
sp_obj = self.dpx*mod.spec_int(self.ldo_hr, con_obj, self.ldo_px)
im_spec = np.zeros((len(sp_obj), 100))
im_sky = np.zeros((len(sp_obj), 100))
total_noise = np.zeros((len(sp_obj), 100))
# No sky frame implies that reduction is as good
# as taking one single sky frame
if nsky == 0:
nsky_t = 1
else:
nsky_t = nsky
for i in range(len(sp_obj)):
im_spec[i, :] = mod.getspread(sp_obj[i], self.seeing, 0) + sp_sky[i]
im_sky[i, :] = sp_sky[i]
spec_noise = mod.getnoise(im_spec[i, :], texp)/np.sqrt(nobj)
sky_noise = mod.getnoise(im_sky[i, :], texp)/np.sqrt(nsky_t)
total_noise[i, :] = np.sqrt(spec_noise**2 + sky_noise**2)
r = np.abs(np.arange(100) - 50)
# Receta de Peter
ind = np.where(r <= 1.2*self.seeing/params['scale'])[0]
# S/N calculation signal-to-noise
ston_sp = (im_spec - im_sky)[:, ind].sum(1)/\
np.sqrt((total_noise[:, ind]**2).sum(1))
satur = mod.checkforsaturation(im_spec[:, ind])
elif ff['source_type'] == 'Extended':
sp_obj = self.dpx*mod.spec_int(self.ldo_hr,
con_obj*params['scale']**2,
self.ldo_px)
im_noise = np.sqrt((mod.getnoise(sp_obj + sp_sky, texp)/
np.sqrt(nobj))**2 +
(mod.getnoise(sp_sky, texp)/np.sqrt(nsky))**2)
satur = mod.checkforsaturation(sp_obj + sp_sky)
ston_sp = sp_obj/im_noise
# Calculate original spectrum for display
con_0 = mod.convolres(self.ldo_hr, self.slitloss*texp*no, self.dpx)
# con_0 = mod.convolres(self.ldo_hr, self.slitloss*texp*no,
# self.cenwl/self.specres)
if ff['source_type'] == 'Point':
sp_0 = mod.spec_int(self.ldo_hr, con_0, self.ldo_px)*self.dpx
elif ff['source_type'] == 'Extended':
sp_0 = self.dpx*mod.spec_int(self.ldo_hr, con_0*params['scale']**2,
self.ldo_px)
# Update by LRP from MBC, this function now returns more parameters
# MB 2016-09-29 return source counts as well
# return ston_sp, sp_0/sp_0.max(), satur
obj_cnts = sp_obj/params['gain']
sky_cnts = sp_sky/params['gain']
return ston_sp, obj_cnts, sky_cnts, sp_0/sp_0.max(), satur, params
def getPhotSton(self, texp=1, nobj=1, nsky=1):
"""For Photometry"""
params = con.get_params()
self.mag_sky = con.get_skymag(self.filtname)
ston = np.zeros_like(texp)
satur = np.zeros_like(texp)
# Added by LRP from MBC's ETC
signal_obj = np.zeros_like(texp)
signal_sky = np.zeros_like(texp)
# 1.- Scale object & sky with Vega
trans_to_scale = self.filt_hr*self.trans
# no=(10**(-1*self.mag/2.5))*mod.vega(self.obj,self.vega,trans_to_scale)\
# *params['area']*float(self.ldo_hr[1]-self.ldo_hr[0])
#
#######################################################################
#
# CGF 02/12/16
#
if ff['template'] == 'Emission line':
no = self.obj*params['area']*float(self.ldo_hr[1] - self.ldo_hr[0])
elif (ff['template'] == 'Model file') & \
(self.obj_units != 'normal_photon'):
no = self.obj*params['area']*float(self.ldo_hr[1] - self.ldo_hr[0])
else:
no = (10**(-1*self.mag/2.5))\
*mod.vega(self.obj, self.vega, trans_to_scale)\
*params['area']*float(self.ldo_hr[1] - self.ldo_hr[0])
#######################################################################
ns = (10**(-1*self.mag_sky/2.5))*\
mod.vega(self.sky_e, self.vega, trans_to_scale)\
*params['area']*float(self.ldo_hr[1]-self.ldo_hr[0])
if ff['template'] == 'Emission line':
no = no + self.obj*params['area']*float(self.ldo_hr[1] -
self.ldo_hr[0])
# 2.- Calculate total fluxes through passbands.
# The filter appears here and in step 1 because there is used
# to calculate the flux under it in order to normalize the
# spectra with Vega. Here is used to calculate total fluxes.
fl_obj = texp*(no*self.filt_hr*self.sky_t).sum()
fl_sky = texp*(ns*self.filt_hr).sum()*params['scale']**2
# In case of point-like source, we need to estimate the aperture
# to properly account for the effect of the RON and sky.
# In the case of extended sources, the estimated values are per pixel
if ff['source_type'] == 'Point':
# 3.- Synthethic image generation
# An "image" of radii values from the center is used to see how
# many pixels fall inside the seeing ring.
im_r = np.zeros((100, 100))
x = np.arange(100)
for i in range(0, 100):
im_r[i, ] = np.sqrt((float(i) - 50.0)**2 + (x - 50.0)**2)
# From Peter: a good guesstimate of the aperture is 1.2*seeing
ind = np.where(im_r <= 0.5*1.2*self.seeing / params['scale'])
# The actual STON calculation
for i in range(len(texp)):
im_obj = mod.getspread(fl_obj[i], self.seeing, 1) + fl_sky[i]
im_sky = np.zeros_like(im_obj) + fl_sky[i]
if nsky == 0:
# For no sky frames is assumed that the reduction
# is as good as taking a single sky frame.
sky_noise = mod.getnoise(im_sky, texp[i])
else:
sky_noise = mod.getnoise(im_sky, texp[i]) / np.sqrt(nsky)
obj_noise = mod.getnoise(im_obj, texp[i]) / np.sqrt(nobj)
total_noise = np.sqrt(sky_noise**2 + obj_noise**2)
ston[i] = (im_obj - im_sky)[ind].sum()\
/ np.sqrt((total_noise[ind]**2).sum())
satur[i] = mod.checkforsaturation(im_obj)
# Added by LRP from MBC's ETC
# MBC added 2016-11-28
# total counts from source and sky in aperture
signal_obj[i] = (im_obj - im_sky)[ind].sum() / params['gain']
signal_sky[i] = im_sky[ind].sum() / params['gain']
# print('Signal_obj[i] {}'.format(signal_obj[i]))
# print('Signal_sky[i] {}'.format(signal_sky[i]))
elif ff['source_type'] == 'Extended':
# For an extended sources calculate the flux per pixel
fl_obj = fl_obj*params['scale']**2
for i in range(len(texp)):
im_obj = np.ones(1)*(fl_obj[i] + fl_sky[i])
im_sky = np.ones(1)*fl_sky[i]
if nsky == 0:
# For no sky frames is assumed that the reduction
# is as good as taking a single sky frame.
sky_noise = mod.getnoise(im_sky, texp[i])
else:
sky_noise = mod.getnoise(im_sky, texp[i])/ np.sqrt(nsky)
obj_noise = mod.getnoise(im_obj, texp[i])/ np.sqrt(nobj)
total_noise = np.sqrt(sky_noise**2 + obj_noise**2)
ston[i] = (im_obj-im_sky) / total_noise
satur[i] = mod.checkforsaturation(im_obj)
# Added by LRP from MBC's ETC
# MBC added 2016-11-28
signal_obj[i] = (im_obj - im_sky) / params['gain']
signal_sky[i] = im_sky / params['gain']
return ston, signal_obj, signal_sky, satur, params
def buildObj(self):
"""Build the SED from the input parameters"""
# CGF 05/12/16
# Default catchall so that the units are always defined
self.obj_units = 'normal_photon'
if ff['template'] == 'Model library':
# CGF 05/12/16
temp_curve = SpecCurve('libs/' + self.available[ff['model']])
self.obj = temp_curve.interpolate(self.ldo_hr)
self.obj_units = temp_curve.unity
elif ff['template'] == 'Black body':
self.bbteff = float(ff['body_temp'])
self.obj = mod.bbody(self.ldo_hr, self.bbteff)
# CGF 05/12/16
self.obj_units = 'normal_photon'
elif ff['template'] == 'Model file':
# User loaded model
# CGF 02/12/16
temp_curve = SpecCurve(ff['model_file'])
self.obj = temp_curve.interpolate(self.ldo_hr)
self.obj_units = temp_curve.unity
elif ff['template'] == 'Emission line':
# LRP: I don't understand this temp buisness
# ... Why do we have 3 loops that seem to do nothing???
# It seems to think we can have multiple emission line inputs but
# the php wrapper doesn't support this.
#
# The input can be several lines separated by commas
#
temp = ff['line_center'].split(',')
self.lcenter = []
for i in temp:
self.lcenter.append(float(i))
temp = ff['line_fwhm'].split(',')
self.lwidth = []
for i in temp:
self.lwidth.append(float(i)*1e-4)
temp = ff['line_peakf'].split(',')
self.lflux = []
for i in temp:
self.lflux.append(float(i)*1e-16)
# In case the number of inputs is different in any section
n_valid = np.min([len(self.lcenter), len(self.lwidth),
len(self.lflux)])
self.lcenter = self.lcenter[0:n_valid]
self.lwidth = self.lwidth[0:n_valid]
self.lflux = self.lflux[0:n_valid]
self.obj = np.zeros_like(self.ldo_hr)
for i in range(len(self.lflux)):
self.obj += mod.emline(self.ldo_hr, self.lcenter[i],
self.lwidth[i], self.lflux[i])
###################################################################
# CGF 05/12/16
self.obj_units = 'photon/s/m2/micron'
# mod.emline outputs in photon/s/m2/micron
def printXML(self, texp, signal_obj, signal_sky, ston, satur, **params):
"""
A function to create the output XML files
Updated to inlucde more output by LRP 08-12-2016
Mainly taken from Marc Balcells' version of the ETC
Would this would be quicker if we just had a few if statements and put
output for each case together -- also may cause fewer errors!
"""
output = ET.Element("output")
if ff['operation'] == 'Photometry':
fig_name = args[0] + "_photo.png"
else:
fig_name = args[0] + "_spec.png"
ET.SubElement(output, "fig").text = fig_name
ET.SubElement(output, "text").text = "SOURCE:"
ET.SubElement(output, "text").text = "{0:s} Source (Vega Mag) = {1:.3f}".\
format(ff['source_type'], self.mag)
if ff['template'] == 'Model library':
ET.SubElement(output, "text").text = "Template: Model library"
ET.SubElement(output, "text").text= "Spectral Type: {0:s}".format(ff['model'])
elif ff['template'] == 'Black body':
ET.SubElement(output, "text").text = "Template: Black Body"
ET.SubElement(output, "text").text = "Temperature = {0:.1f} K".format(float(ff['body_temp']))
elif ff['template'] == 'Emission line':
ET.SubElement(output, "text").text = "Template: Emission Line"
ET.SubElement(output, "text").text = "Center = {0:s}, FWHM = {1:s}, Total line flux = {2:s}"\
.format(ff['line_center'], ff['line_fwhm'], ff['line_peakf'])
elif ff['template'] == 'Model file':
ET.SubElement(output, "text").text = "Template: Model file"
ET.SubElement(output, "text").text = "Model file = {0:s}".format(ff['model_file'])
ET.SubElement(output, "text").text = "----------------------------------------------------------------"
ET.SubElement(output, "text").text = "OBSERVATION:"
ET.SubElement(output, "text").text = "Operation: {0:s}".format(ff['operation'])
ET.SubElement(output, "text").text = "Exposure time(s) = {0:s}".format(ff['spec_exp_time'])
ET.SubElement(output, "text").text = "Number of exposures: Object {0:d}, Sky {1:d}".format(int(self.nobj), int(self.nsky))
ET.SubElement(output, "text").text = "----------------------------------------------------------------"
ET.SubElement(output, "text").text = "TELESCOPE AND INSTRUMENT:"
if ff['operation'] == 'Photometry':
ET.SubElement(output, "text").text = "Filter: {0:s} ".format(self.filtname)
else:
ET.SubElement(output, "text").text = "Grism: {0:s}".format(self.grismname)
ET.SubElement(output, "text").text = "Slit width = {0:.2f} arcsec".format(self.slitwidth)
ET.SubElement(output, "text").text = "Telescope collecting area = {0:.1f} m<sup>2</sup>".format(params['area'])
# ET.SubElement(output, "text").text = "----------------------------------------------------------------"
# ET.SubElement(output, "text").text = "Detector: "
ET.SubElement(output, "text").text = "Spatial scale = {0:.4f} arcsec/pix ".format(params['scale'])
ET.SubElement(output, "text").text = "Readout noise = {0:.1f} e<sup>-</sup> ".format(params['RON'])
ET.SubElement(output, "text").text = "Dark current = {0:.2f} e<sup>-</sup>/hr".format(params['DC'])
ET.SubElement(output, "text").text = "Well depth = {0:.1f} e<sup>-</sup>".format(params['well'])
ET.SubElement(output, "text").text = "Gain = {0:.2f} e<sup>-</sup>/ADU".format(params['gain'])
ET.SubElement(output, "text").text = "----------------------------------------------------------------"
ET.SubElement(output, "text").text = "OBSERVING CONDITIONS:"
ET.SubElement(output, "text").text = "Airmass = {0:.2f}".format(self.airmass)
ET.SubElement(output, "text").text = "Seeing = {0:.2f} arcsec FWHM".format(self.seeing)
ET.SubElement(output, "text").text = "Sky brightness = {0:.2f} Vega mag / arcsec<sup>2</sup>".format(self.mag_sky)
ET.SubElement(output, "text").text = " "
ET.SubElement(output, "text").text = "----------------------------------------------------------------"
ET.SubElement(output, "text").text= "RESULTS:"
tabletext = ""
if ff['operation']=='Spectroscopy':
ET.SubElement(output, "text").text = "Wavelength coverage: {0:.2f} - {1:.2f} μ".format(self.ldo_px[0],self.ldo_px[-1])
ET.SubElement(output, "text").text = "Dispersion {0:.2f} Å/pix".format(self.dpx*1e4)
# ET.SubElement(output, "text").text = "Resolution element {0:.2f} Å".format(self.cenwl*1e4/self.specres)
ET.SubElement(output, "text").text = "Resolution element {0:.2f} Å".format(self.res_ele*1e4)
ET.SubElement(output, "text").text = "In-slit fraction {0:.4f} ".format(self.slitloss)
# Diagnostics:
ET.SubElement(output, "text").text = "Nominal Spectral resolution {0:.4f} ".format(self.specres)
ET.SubElement(output, "text").text = "Achieved Spectral resolution {0:.4f} ".format(self.cenwl/self.res_ele)
# ET.SubElement(output, "text").text = "Central lambda {0:.4f} ".format(self.cenwl)
if self.timerange != 'Range':
ET.SubElement(output, "text").text = "For {0:d} exposure(s) of {1:.1f} s: ".format(int(self.nobj),texp[0])
if ff['template'] == 'Emission line':
ET.SubElement(output, "text").text = "Maximum counts from object {0:.1f}, median from sky: {1:.1f}".format(signal_obj[0],signal_sky[0])
ET.SubElement(output, "text").text = "Maximum S/N = {0:.1f}".format(ston[0])
ET.SubElement(output, "text").text = "Effective gain = {0:.2f} ".format(params['gain']*self.nobj)
# ET.SubElement(output, "text").text = "For time {0:.1f} s the expected S/N is {1:.1f}".format(texp[0]*self.nobj,ston[0])
else:
ET.SubElement(output, "text").text = "Median counts per pixel: from object = {0:.1f}, from sky = {1:.1f}".format(signal_obj[0],signal_sky[0])
ET.SubElement(output, "text").text = "Median S/N per pixel = {0:.1f}".format(ston[0])
ET.SubElement(output, "text").text = "Effective gain = {0:.2f} ".format(params['gain']*self.nobj)
ET.SubElement(output, "text").text = "For time {0:.1f} s the expected median S/N is {1:.1f}".format(texp[0]*self.nobj, ston[0])
if satur:
ET.SubElement(output, "warning").text = "for time {0:.1f} s some pixels are saturated".format(texp[0]*self.nobj)
else:
tabletext += "\n\tFor the selected time range, the expected S/N per pixel are:"
tabletext += "\n\t t(s)\t S/N\tSaturation?"
tabletext += "\n\t----------------------"
if ff['operation'] == 'Photometry':
for i in range(0, 99, 10):
flags = 'No'
if satur[i]:
flags = 'Yes'
tabletext += '\n\t{0:8.1f}\t{1:8.1f}\t'\
.format(texp[i]*self.nobj, ston[i]) + flags
flags = 'No'
if satur[-1]:
flags = 'Yes'
tabletext += '\n\t{0:8.1f}\t{1:8.1f}\t'\
.format(texp[-1]*self.nobj, ston[-1]) + flags
else:
for i in range(0, 9):
flags = 'No'
if satur[i]:
flags = 'Yes'
tabletext += '\n\t{0:8.1f}\t{1:8.1f}\t'\
.format(texp[i]*self.nobj, ston[i]) + flags
tabletext += "\n"
ET.SubElement(output, "table").text = tabletext
emir_guy.indent(output)
tree = ET.ElementTree(output)
tree.write(args[0] + "_out.xml")
try:
EmirGui()
except SystemExit:
pass
except:
emir_guy.generic_error(args[0])
exit()
| null |
etc_gui.py
|
etc_gui.py
|
py
| 36,461 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.use",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "optparse.OptionParser",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "etc_config.get_config",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "etc_classes.SpecCurve",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "etc_classes.SpecCurve",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "etc_classes.SpecCurve",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "etc_classes.SpecCurve",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "emir_guy.readxml",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "emir_guy.load",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "emir_guy.check_inputs",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "etc_modules.interpolatesky",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "etc_config.get_filter",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab.plot",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.xlabel",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.ylabel",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.ylabel",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.savefig",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "etc_modules.slitpercent",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "etc_modules.interpolatesky",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "etc_config.get_grism",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "numpy.nonzero",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "numpy.nonzero",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "numpy.nonzero",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "numpy.nonzero",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab.figure",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 237,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.subplot",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 238,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.plot",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 239,
"usage_type": "name"
},
{
"api_name": "numpy.median",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "numpy.nonzero",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab.plot",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 242,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab.xlim",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.xlabel",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 244,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.ylabel",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.ylabel",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 248,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.subplot",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 250,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.plot",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 251,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.plot",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 252,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.xlim",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 253,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.xlabel",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.ylabel",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 255,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.grid",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.subplot",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 258,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.plot",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 259,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.xlim",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.xlabel",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 261,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.ylabel",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 262,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.subplot",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 264,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.plot",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 265,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.plot",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 266,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.plot",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 267,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.plot",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 268,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.plot",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 269,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.plot",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 270,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.xlim",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 271,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.legend",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 272,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.xlabel",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 273,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.ylabel",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 274,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.subplot",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 276,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.plot",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 277,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.xlim",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 278,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.legend",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 279,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.xlabel",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.ylabel",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 281,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.subplot",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 283,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.plot",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 284,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.plot",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 285,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.plot",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 286,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.plot",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.plot",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 288,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.plot",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 289,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.legend",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 290,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.xlabel",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 291,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.ylabel",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 292,
"usage_type": "name"
},
{
"api_name": "numpy.zeros_like",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "numpy.nonzero",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "numpy.nonzero",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "numpy.nonzero",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab.figure",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 315,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.subplot",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 316,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.plot",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 317,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.xlabel",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 318,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.ylabel",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 320,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.ylabel",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 322,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.subplot",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 325,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.plot",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 326,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.xlabel",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 327,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.ylabel",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 328,
"usage_type": "name"
},
{
"api_name": "matplotlib.pylab.savefig",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 329,
"usage_type": "name"
},
{
"api_name": "etc_config.get_params",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "etc_config.get_skymag",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "etc_modules.vega",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "etc_modules.convolres",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "etc_modules.spec_int",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "etc_modules.spec_int",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "etc_modules.spec_int",
"line_number": 379,
"usage_type": "call"
},
{
"api_name": "etc_modules.spec_int",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "etc_modules.spec_int",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "etc_modules.spec_int",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "etc_modules.spec_int",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "etc_modules.spec_int",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "etc_modules.spec_int",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "etc_modules.spec_int",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "etc_config.get_skymag",
"line_number": 391,
"usage_type": "call"
},
{
"api_name": "etc_modules.vega",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "etc_config.get_skymag",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "etc_modules.vega",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "etc_modules.convolres",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "etc_modules.convolres",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "etc_modules.spec_int",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "etc_modules.spec_int",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 415,
"usage_type": "call"
},
{
"api_name": "etc_modules.vega",
"line_number": 420,
"usage_type": "call"
},
{
"api_name": "etc_modules.convolres",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "etc_modules.spec_int",
"line_number": 429,
"usage_type": "call"
},
{
"api_name": "etc_modules.spec_int",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 436,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "etc_modules.getspread",
"line_number": 448,
"usage_type": "call"
},
{
"api_name": "etc_modules.getnoise",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "etc_modules.getnoise",
"line_number": 452,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 452,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 453,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 457,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "etc_modules.checkforsaturation",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "etc_modules.spec_int",
"line_number": 465,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "etc_modules.getnoise",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "etc_modules.getnoise",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "etc_modules.checkforsaturation",
"line_number": 472,
"usage_type": "call"
},
{
"api_name": "etc_modules.convolres",
"line_number": 477,
"usage_type": "call"
},
{
"api_name": "etc_modules.spec_int",
"line_number": 481,
"usage_type": "call"
},
{
"api_name": "etc_modules.spec_int",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "etc_config.get_params",
"line_number": 496,
"usage_type": "call"
},
{
"api_name": "etc_config.get_skymag",
"line_number": 497,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 498,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 499,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 501,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 502,
"usage_type": "call"
},
{
"api_name": "etc_modules.vega",
"line_number": 521,
"usage_type": "call"
},
{
"api_name": "etc_modules.vega",
"line_number": 526,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 550,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 551,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 553,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 557,
"usage_type": "call"
},
{
"api_name": "etc_modules.getspread",
"line_number": 562,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 563,
"usage_type": "call"
},
{
"api_name": "etc_modules.getnoise",
"line_number": 568,
"usage_type": "call"
},
{
"api_name": "etc_modules.getnoise",
"line_number": 570,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 570,
"usage_type": "call"
},
{
"api_name": "etc_modules.getnoise",
"line_number": 572,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 572,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 573,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 575,
"usage_type": "call"
},
{
"api_name": "etc_modules.checkforsaturation",
"line_number": 576,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 590,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 591,
"usage_type": "call"
},
{
"api_name": "etc_modules.getnoise",
"line_number": 596,
"usage_type": "call"
},
{
"api_name": "etc_modules.getnoise",
"line_number": 598,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 598,
"usage_type": "call"
},
{
"api_name": "etc_modules.getnoise",
"line_number": 599,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 599,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 600,
"usage_type": "call"
},
{
"api_name": "etc_modules.checkforsaturation",
"line_number": 602,
"usage_type": "call"
},
{
"api_name": "etc_classes.SpecCurve",
"line_number": 617,
"usage_type": "call"
},
{
"api_name": "etc_modules.bbody",
"line_number": 622,
"usage_type": "call"
},
{
"api_name": "etc_classes.SpecCurve",
"line_number": 628,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 654,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 659,
"usage_type": "call"
},
{
"api_name": "etc_modules.emline",
"line_number": 661,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.Element",
"line_number": 677,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 677,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 684,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 684,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 686,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 686,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 687,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 687,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 690,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 690,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 691,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 691,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 693,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 693,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 694,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 694,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 696,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 696,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 697,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 697,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 700,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 700,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 701,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 701,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 703,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 703,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 704,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 704,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 705,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 705,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 706,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 706,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 707,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 707,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 708,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 708,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 709,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 709,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 711,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 711,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 713,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 713,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 714,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 714,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 715,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 715,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 719,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 719,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 720,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 720,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 721,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 721,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 722,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 722,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 723,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 723,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 724,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 724,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 725,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 725,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 726,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 726,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 727,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 727,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 728,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 728,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 729,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 729,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 730,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 730,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 731,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 731,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 735,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 735,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 736,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 736,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 738,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 738,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 739,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 739,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 741,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 741,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 742,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 742,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 745,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 745,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 748,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 748,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 749,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 749,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 750,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 750,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 753,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 753,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 754,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 754,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 755,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 755,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 756,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 756,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 758,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 758,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 784,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 784,
"usage_type": "name"
},
{
"api_name": "emir_guy.indent",
"line_number": 785,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.ElementTree",
"line_number": 786,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 786,
"usage_type": "name"
},
{
"api_name": "emir_guy.generic_error",
"line_number": 794,
"usage_type": "call"
}
] |
187170802
|
import pygame
LABELS = ['Название', 'Новая игра', 'Продолжить', 'Достижения', 'Настройки', 'Выход']
pygame.init()
w, h = 800, 500
screen = pygame.display.set_mode((w, h))
clock = pygame.time.Clock()
pygame.display.set_caption('Super Game')
font = pygame.font.Font(None, 25)
q = True
while q:
for event in pygame.event.get():
if event.type == pygame.QUIT:
q = False
screen.fill((0,0,0))
screen.blit(font.render(LABELS[0], 1, (255, 0, 0), (0, 0, 0)), (100, 100))
pygame.draw.rect(screen, (123, 0, 123), (90, 90, 130, 30), 1)
screen.blit(font.render(LABELS[1], 1, (255, 0, 0), (0, 0, 0)), (100, 150))
pygame.draw.rect(screen, (123, 0, 123), (90, 140, 130, 30), 1)
screen.blit(font.render(LABELS[2], 1, (255, 0, 0), (0, 0, 0)), (100, 200))
pygame.draw.rect(screen, (123, 0, 123), (90, 190, 130, 30), 1)
screen.blit(font.render(LABELS[3], 1, (255, 0, 0), (0, 0, 0)), (100, 250))
pygame.draw.rect(screen, (123, 0, 123), (90, 240, 130, 30), 1)
screen.blit(font.render(LABELS[4], 1, (255, 0, 0), (0, 0, 0)), (100, 300))
pygame.draw.rect(screen, (123, 0, 123), (90, 290, 130, 30), 1)
screen.blit(font.render(LABELS[5], 1, (255, 0, 0), (0, 0, 0)), (100, 350))
pygame.draw.rect(screen, (123, 0, 123), (90, 340, 130, 30), 1)
pygame.display.update()
pygame.quit()
| null |
Game/Game_Menu.py
|
Game_Menu.py
|
py
| 1,385 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pygame.init",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.Clock",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.Font",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 31,
"usage_type": "call"
}
] |
511306261
|
from django.urls import path
from login import views
urlpatterns = [
path('acceso/', views.acceso, name="acceso"),
path('acceso/listadousuarios/', views.listaUsuarios, name="listadousuarios"),
path('acceso/editarusuario/<str:id>/', views.editarusuario, name="editarusuario"),
path('acceso/actualizarusuario/', views.actualizarusuario, name="actualizarusuario"),
path('acceso/nuevousuario/', views.nuevousuario, name="nuevousuario"),
#path('setup/<str:id>/', views.setuprefrehs, name="setuprefrehs"),
]
| null |
login/urls.py
|
urls.py
|
py
| 533 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "login.views.acceso",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "login.views",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "login.views.listaUsuarios",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "login.views",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "login.views.editarusuario",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "login.views",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "login.views.actualizarusuario",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "login.views",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "login.views.nuevousuario",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "login.views",
"line_number": 10,
"usage_type": "name"
}
] |
127055147
|
from django.core.management import BaseCommand
from api.tasks import run_scrapers_nightly
class Command(BaseCommand):
help = 'Runs all of the scrapers. Good for setting up a development machine with new data.'
def handle(self, *args, **options):
self.stdout.write('Scraping Brigades and Projects from CfA website...')
run_scrapers_nightly() # running the nightly task pretty much covers it!
self.stdout.write('Finished scraping brigades and projects!')
| null |
api/management/commands/take_snapshot.py
|
take_snapshot.py
|
py
| 491 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.core.management.BaseCommand",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "api.tasks.run_scrapers_nightly",
"line_number": 11,
"usage_type": "call"
}
] |
501737759
|
import sqlalchemy
def getDiffOfModelAgainstDatabase(model, conn, excludeTables=None):
''' Return differences of model against database.
Returned object will evaluate to True if there are differences else False.
'''
return SchemaDiff(model, conn, excludeTables)
def getDiffOfModelAgainstModel(oldmodel, model, conn, excludeTables=None):
''' Return differences of model against database.
Returned object will evaluate to True if there are differences else False.
'''
return SchemaDiff(model, conn, excludeTables, oldmodel=oldmodel)
class SchemaDiff(object):
''' Differences of model against database. '''
def __init__(self, model, conn, excludeTables=None, oldmodel=None):
''' Parameter model is your Python model's metadata and conn is an active database connection. '''
self.model = model
self.conn = conn
if not excludeTables: excludeTables = [] # [] can't be default value in Python parameter
self.excludeTables = excludeTables
if oldmodel:
self.reflected_model = oldmodel
else:
self.reflected_model = sqlalchemy.MetaData(conn, reflect=True)
self.tablesMissingInDatabase, self.tablesMissingInModel, self.tablesWithDiff = [], [], []
self.colDiffs = {}
self.compareModelToDatabase()
def compareModelToDatabase(self):
''' Do actual comparison. '''
# Setup common variables.
cc = self.conn.contextual_connect()
schemagenerator = self.conn.dialect.schemagenerator(self.conn.dialect, cc)
# For each in model, find missing in database.
for modelName, modelTable in self.model.tables.items():
if modelName in self.excludeTables:
continue
reflectedTable = self.reflected_model.tables.get(modelName, None)
if reflectedTable:
# Table exists.
pass
else:
self.tablesMissingInDatabase.append(modelTable)
# For each in database, find missing in model.
for reflectedName, reflectedTable in self.reflected_model.tables.items():
if reflectedName in self.excludeTables:
continue
modelTable = self.model.tables.get(reflectedName, None)
if modelTable:
# Table exists.
# Find missing columns in database.
for modelCol in modelTable.columns:
databaseCol = reflectedTable.columns.get(modelCol.name, None)
if databaseCol:
pass
else:
self.storeColumnMissingInDatabase(modelTable, modelCol)
# Find missing columns in model.
for databaseCol in reflectedTable.columns:
modelCol = modelTable.columns.get(databaseCol.name, None)
if modelCol:
# Compare attributes of column.
modelDecl = schemagenerator.get_column_specification(modelCol)
databaseDecl = schemagenerator.get_column_specification(databaseCol)
if modelDecl != databaseDecl:
# Unfortunately, sometimes the database decl won't quite match the model, even though they're the same.
mc, dc = modelCol.type.__class__, databaseCol.type.__class__
if (issubclass(mc, dc) or issubclass(dc, mc)) and modelCol.nullable == databaseCol.nullable:
# Types and nullable are the same.
pass
else:
self.storeColumnDiff(modelTable, modelCol, databaseCol, modelDecl, databaseDecl)
else:
self.storeColumnMissingInModel(modelTable, databaseCol)
else:
self.tablesMissingInModel.append(reflectedTable)
def __str__(self):
''' Summarize differences. '''
def colDiffDetails():
colout = []
for table in self.tablesWithDiff:
tableName = table.name
missingInDatabase, missingInModel, diffDecl = self.colDiffs[tableName]
if missingInDatabase:
colout.append(' %s missing columns in database: %s' % (tableName, ', '.join([col.name for col in missingInDatabase])))
if missingInModel:
colout.append(' %s missing columns in model: %s' % (tableName, ', '.join([col.name for col in missingInModel])))
if diffDecl:
colout.append(' %s with different declaration of columns in database: %s' % (tableName, str(diffDecl)))
return colout
out = []
if self.tablesMissingInDatabase:
out.append(' tables missing in database: %s' % ', '.join([table.name for table in self.tablesMissingInDatabase]))
if self.tablesMissingInModel:
out.append(' tables missing in model: %s' % ', '.join([table.name for table in self.tablesMissingInModel]))
if self.tablesWithDiff:
out.append(' tables with differences: %s' % ', '.join([table.name for table in self.tablesWithDiff]))
if out:
out.insert(0, 'Schema diffs:')
out.extend(colDiffDetails())
return '\n'.join(out)
else:
return 'No schema diffs'
#__repr__ = __str__
def __len__(self):
''' Used in bool evaluation, return of 0 means no diffs. '''
return len(self.tablesMissingInDatabase) + len(self.tablesMissingInModel) + len(self.tablesWithDiff)
def storeColumnMissingInDatabase(self, table, col):
if table not in self.tablesWithDiff:
self.tablesWithDiff.append(table)
missingInDatabase, missingInModel, diffDecl = self.colDiffs.setdefault(table.name, ([], [], []))
missingInDatabase.append(col)
def storeColumnMissingInModel(self, table, col):
if table not in self.tablesWithDiff:
self.tablesWithDiff.append(table)
missingInDatabase, missingInModel, diffDecl = self.colDiffs.setdefault(table.name, ([], [], []))
missingInModel.append(col)
def storeColumnDiff(self, table, modelCol, databaseCol, modelDecl, databaseDecl):
if table not in self.tablesWithDiff:
self.tablesWithDiff.append(table)
missingInDatabase, missingInModel, diffDecl = self.colDiffs.setdefault(table.name, ([], [], []))
diffDecl.append( (modelCol, databaseCol, modelDecl, databaseDecl) )
| null |
sqlalchemy-migrate/migrate/versioning/schemadiff.py
|
schemadiff.py
|
py
| 6,811 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sqlalchemy.MetaData",
"line_number": 30,
"usage_type": "call"
}
] |
572848560
|
import os
import pandas as pd
import numpy as np
import urllib.request
import io
import zipfile
import json
DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data')
def load_zip_get_file(url, file, decoder='utf-8'):
"""
Load a zipfile from a URL and extract a single file. Note that this is
not ideal and may fail for large files since the files must fit in memory.
Parameters
----------
url: str
URL to read from.
file: str
Filename to pull out of the zipfile.
decoder: str
Usually None for raw bytes or 'utf-8', or 'latin1'
Returns
-------
file_buffer: io.BytesIO or io.StringIO
The file buffer for the requested file if decoder is None else return
a decoded StringIO.
"""
remotezip = urllib.request.urlopen(url)
zipinmemory = io.BytesIO(remotezip.read())
zf = zipfile.ZipFile(zipinmemory)
byte_string = zf.read(file)
if decoder:
string = byte_string.decode(decoder)
return io.StringIO(string)
else:
return io.BytesIO(byte_string)
def cache_county_case_data():
"""
Cache county covid case data in #PYSEIR_HOME/data.
"""
print('Downloading covid case data')
# Previous datasets from coronadatascraper
# county_fips_map = pd.read_csv(os.path.join(DATA_DIR, 'county_state_fips.csv'), dtype='str', low_memory=False)
# case_data = pd.read_csv('https://coronadatascraper.com/timeseries-tidy.csv', low_memory=False)
#
# fips_merged = case_data.merge(county_fips_map, left_on=('county', 'state'), right_on=('COUNTYNAME', 'STATE'))\
# [['STCOUNTYFP', 'county', 'state', 'population', 'lat', 'long', 'date', 'type', 'value']]
#
# fips_merged.columns = [col.lower() for col in fips_merged.columns]
# NYT dataset
county_case_data = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype='str')
county_case_data['date'] = pd.to_datetime(county_case_data['date'])
county_case_data[['cases', 'deaths']] = county_case_data[['cases', 'deaths']].astype(int)
county_case_data = county_case_data[county_case_data['fips'].notnull()]
county_case_data.to_pickle(os.path.join(DATA_DIR, 'covid_case_timeseries.pkl'))
# def cache_county_metadata():
# """
# Cache 2019 census data including age distribution by state/county FIPS.
#
# # TODO Add pop density
# """
# print('Downloading county level population data')
# county_summary = pd.read_csv(
# 'https://www2.census.gov/programs-surveys/popest/datasets/2010-2018/counties/asrh/cc-est2018-alldata.csv',
# sep=',', encoding="ISO-8859-1", dtype='str', low_memory=False)
#
# df = county_summary[county_summary.YEAR == '11'][['STATE', 'COUNTY', 'CTYNAME', 'AGEGRP', 'TOT_POP']]
# df[['AGEGRP', 'TOT_POP']] = df[['AGEGRP', 'TOT_POP']].astype(int)
# list_agg = df.sort_values(['STATE', 'COUNTY', 'CTYNAME', 'AGEGRP']) \
# .groupby(['STATE', 'COUNTY', 'CTYNAME'])['TOT_POP'] \
# .apply(np.array) \
# .reset_index()
# list_agg['TOTAL'] = list_agg['TOT_POP'].apply(lambda x: x[0])
# list_agg['AGE_DISTRIBUTION'] = list_agg['TOT_POP'].apply(lambda x: x[1:])
# list_agg.drop('TOT_POP', axis=1)
#
# age_bins = list(range(0, 86, 5))
# age_bins += [120]
# list_agg['AGE_BIN_EDGES'] = [np.array(age_bins) for _ in
# range(len(list_agg))]
#
# list_agg.insert(0, 'fips', list_agg['STATE'] + list_agg['COUNTY'])
# list_agg = list_agg.drop(['COUNTY', 'TOT_POP'], axis=1)
# list_agg.columns = [col.lower() for col in list_agg.columns]
# list_agg = list_agg.rename(
# mapper={'ctyname': 'county_name', 'total': 'total_population'}, axis=1)
# list_agg.to_pickle(os.path.join(DATA_DIR, 'covid_county_metadata.pkl'))
def cache_hospital_beds():
"""
Pulled from "Definitive"
See: https://services7.arcgis.com/LXCny1HyhQCUSueu/arcgis/rest/services/Definitive_Healthcare_Hospitals_Beds_Hospitals_Only/FeatureServer/0
"""
print('Downloading ICU capacity data.')
url = 'http://opendata.arcgis.com/datasets/f3f76281647f4fbb8a0d20ef13b650ca_0.geojson'
tmp_file = urllib.request.urlretrieve(url)[0]
with open(tmp_file) as f:
vals = json.load(f)
df = pd.DataFrame([val['properties'] for val in vals['features']])
df.columns = [col.lower() for col in df.columns]
df = df.drop(['objectid', 'state_fips', 'cnty_fips'], axis=1)
df.to_pickle(os.path.join(DATA_DIR, 'icu_capacity.pkl'))
def cache_mobility_data():
"""
Pulled from https://github.com/descarteslabs/DL-COVID-19
"""
print('Downloading mobility data.')
url = 'https://raw.githubusercontent.com/descarteslabs/DL-COVID-19/master/DL-us-mobility-daterow.csv'
dtypes_mapping = {
'country_code': str,
'admin_level': int,
'admin1': str,
'admin2': str,
'fips': str,
'samples': int,
'm50': float,
'm50_index': float}
df = pd.read_csv(filepath_or_buffer=url, parse_dates=['date'], dtype=dtypes_mapping)
df__m50 = df.query('admin_level == 2')[['fips', 'date', 'm50']]
df__m50_index = df.query('admin_level == 2')[['fips', 'date', 'm50_index']]
df__m50__final = df__m50.groupby('fips').agg(list).reset_index()
df__m50_index__final = df__m50_index.groupby('fips').agg(list).reset_index()
df__m50__final['m50'] = df__m50__final['m50'].apply(lambda x: np.array(x))
df__m50_index__final['m50_index'] = df__m50_index__final['m50_index'].apply(lambda x: np.array(x))
df__m50__final.to_pickle(os.path.join(DATA_DIR, 'mobility_data__m50.pkl'))
df__m50_index__final.to_pickle(os.path.join(DATA_DIR, 'mobility_data__m50_index.pkl'))
def load_county_case_data():
"""
Return county level case data. The following columns:
Returns
-------
: pd.DataFrame
"""
return pd.read_pickle(os.path.join(DATA_DIR, 'covid_case_timeseries.pkl'))
def load_county_metadata():
"""
Return county level metadata such as age distributions, populations etc..
Returns
-------
: pd.DataFrame
"""
# return pd.read_pickle(os.path.join(DATA_DIR, 'covid_county_metadata.pkl'))
return pd.read_json('/Users/ecarlson/county_covid_seir_models/data/county_metadata.json')
def load_hospital_data():
"""
Return hospital level data. Note that this must be aggregated by stcountyfp
to obtain county level estimates.
Returns
-------
: pd.DataFrame
"""
return pd.read_pickle(os.path.join(DATA_DIR, 'icu_capacity.pkl'))
def load_mobility_data_m50():
"""
Return mobility data without normalization
Returns
-------
: pd.DataFrame
"""
return pd.read_pickle(os.path.join(DATA_DIR, 'mobility_data__m50.pkl'))
def load_mobility_data_m50_index():
"""
Return mobility data with normalization: per
https://github.com/descarteslabs/DL-COVID-19 normal m50 is defined during
2020-02-17 to 2020-03-07.
Returns
-------
: pd.DataFrame
"""
return pd.read_pickle(os.path.join(DATA_DIR, 'mobility_data__m50_index.pkl'))
def cache_all_data():
"""
Download all datasets locally.
"""
cache_county_case_data()
# cache_county_metadata()
cache_hospital_beds()
cache_mobility_data()
if __name__ == '__main__':
cache_all_data()
| null |
pyseir/load_data.py
|
load_data.py
|
py
| 7,445 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "urllib.request.request.urlopen",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "io.BytesIO",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "zipfile.ZipFile",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "io.StringIO",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.request.urlretrieve",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_pickle",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_json",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "pandas.read_pickle",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_pickle",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_pickle",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 202,
"usage_type": "attribute"
}
] |
73492358
|
import re
import urllib
import httplib2
from urllib3.exceptions import TimeoutError
from log import warning, stopped
# MOST OF THE CODE WAS CODE TAKEN FROM YOUTUBE-DL AND OR CHANGED TO FIT HERE. CREDIT TO THE AUTHORS OF YOUTUBE-DL.
# YOU WILL NEED PYTHON 3 FOR THIS.
def download_website(url, Headers=None, RequestMethod='GET'):
try:
from urllib.request import urlopen, Request
except ImportError:
urlopen = None
Request = None
stopped("Unsupported version of Python. You need Version 3 :<")
request = Request(url, headers=Headers)
try:
response = urlopen(request,
data=urllib.parse.urlencode({}).encode("utf-8") if 'POST' in RequestMethod else None)
except urllib.error.HTTPError as e1:
try:
if e1.code == 504:
return 504
return e1.read().decode('utf-8')
except AttributeError:
if 'CERTIFICATE_VERIFY_FAILED' in str(e1): # ERROR IN URLError.
return 2
return None
except (TimeoutError, OSError) as e:
if 'closed' in str(e): # ERROR IN OSError.
return 504
if 'CERTIFICATE_VERIFY_FAILED' in str(e): # ERROR IN URLError.
return 2
return None
except Exception as e4:
warning("Unable to request HTTP website.")
warning("Error: " + str(e4))
return None
try:
website_bytes = response.read()
except OSError as e4:
warning("Error: " + str(e4))
warning("Unable to read website bytes.")
return None
try:
decoded_bytes = website_bytes.decode('utf-8')
except Exception as e4:
warning("Error: " + str(e4))
warning("Unable to decode website bytes.")
return None
return decoded_bytes
def parse_json(json_string):
import json
try:
return json.loads(json_string)
except Exception:
return None
def download_json(url, Headers=None, RequestMethod='GET'):
res = download_website(url, Headers=Headers, RequestMethod=RequestMethod)
if type(res) is not str:
return res
json_string = res
return parse_json(json_string)
def stringToInt(string):
try:
okay = int(string)
return okay
except ValueError:
return None
| null |
CLIENT/PYTHON-FILES/utils.py
|
utils.py
|
py
| 2,331 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "urllib.request.urlopen",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "urllib.request.Request",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "log.stopped",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "urllib.request.Request",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "urllib.parse.urlencode",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "urllib.error",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "urllib3.exceptions.TimeoutError",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "log.warning",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "log.warning",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "log.warning",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "log.warning",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "log.warning",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "log.warning",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 62,
"usage_type": "call"
}
] |
435063841
|
import os
import sys
from pygit2 import Repository
from controller.main_loop import MainLoop
from misc.events_dispatcher import EventsDispatcher
from misc.pubsub import PubSub
from config import *
from drawer.drawer import Drawer
class Main:
def __init__(self):
pubsub = PubSub()
events_dispatcher = EventsDispatcher(pubsub)
display = self.display_surface()
repo = Repository(config['repo_path'])
branches = repo.listall_branches()
drawer = Drawer(repo, branches, display, pubsub)
pubsub.sub('on_program_exit', self.exit_program)
self.main_loop = MainLoop(display, drawer, events_dispatcher)
self.init_screen()
def start(self):
self.log_start()
self.main_loop.start()
@staticmethod
def log_start():
if config['log_events']:
print('start ' + config['title'])
@staticmethod
def exit_program(obs):
if config['log_events']:
print('exit ' + config['title'])
while True:
try:
if obs:
obs.stop()
obs.join()
break
except AttributeError:
continue
pygame.quit()
sys.exit(0)
@staticmethod
def display_surface():
if config['full_screen']:
return pygame.display.set_mode((0, 0), pygame.FULLSCREEN)
else:
return pygame.display.set_mode(config['resolution'], pygame.RESIZABLE)
def init_screen(self):
pygame.init()
self.set_centered_on_screen()
self.set_title()
@staticmethod
def set_centered_on_screen():
if config['centered_on_screen']:
os.environ['SDL_VIDEO_CENTERED'] = '1'
@staticmethod
def set_title():
pygame.display.set_caption(config['title'])
| null |
controller/main.py
|
main.py
|
py
| 1,862 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "misc.pubsub.PubSub",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "misc.events_dispatcher.EventsDispatcher",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygit2.Repository",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "drawer.drawer",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "drawer.drawer.Drawer",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "controller.main_loop.MainLoop",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "drawer.drawer",
"line_number": 23,
"usage_type": "argument"
},
{
"api_name": "sys.exit",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 67,
"usage_type": "attribute"
}
] |
67963023
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User, blank=True, on_delete=models.CASCADE)
phone = models.IntegerField(null=True, blank=True)
designation = models.CharField(max_length=100, null=True, blank=True)
salary = models.IntegerField(null=True, blank=True)
profile_pic = models.ImageField(default='profile_pic_default.png', upload_to='', null=True, blank=True)
class Meta:
ordering = ('-salary',)
def __str__(self):
return "{0} - {1}".format(self.user.username, self.designation)
| null |
employees/models.py
|
models.py
|
py
| 645 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.db.models.Model",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.db.models.OneToOneField",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 6,
"usage_type": "argument"
},
{
"api_name": "django.db.models",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.db.models.ImageField",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 10,
"usage_type": "name"
}
] |
150421507
|
######################################
# Authors: Nick Baquer, Paco Frantzen, Maximilian Meijkamp, Dario van Wagensveld, Mustafa Wahid and Andreas Zafiropoulos
# TU Delft, March 2020, AE3212-II Simulation, Verification and Validation group A10
# Python Script. numericalmodel.py. retrieved from https://github.com/MaxMeijkamp/A10_SVV/blob/master/numericaltools.py
# on 15 March 2020, 15:34.
######################################
import numpy as np
from functools import partial
def spline(x, f, n):
# Spline function,
if len(x) != len(f):
raise ValueError("The lists are not of the same shape")
sp_start = []
sp_slope = []
for i in range(0, n-1):
splinestart = f[i]
splineslope = ((f[i+1] - f[i])/(x[i+1] - x[i]))
sp_start.append(splinestart)
sp_slope.append(splineslope)
# sp = np.vstack((np.array(sp_start), np.array(sp_slope)))
return sp_start, sp_slope # sp.T
def interpolate(x, f, x_target):
# Interpolation function which gives f_target for a given x_target and x-f spline
n = len(x)
x = np.asarray(x)
f = np.asarray(f)
if x[0] > x_target or x_target > x[n-1]:
raise ValueError("The target location is not in the range of provided function values, x_tar =", x_target,
"; x[0] =", x[0], "; x[n-1] =", x[n-1])
elif x_target in x:
return f[np.where(x_target==x)]
else:
sp_start, sp_slope = spline(x, f, n)
left_i = n-2
for i in range(n):
if x[i] > x_target:
left_i = i-1
break
f_target = sp_slope[left_i] * (x_target - x[left_i]) + sp_start[left_i]
return f_target
def cont_spline(x_discrete, f_discrete):
return np.vectorize(partial(interpolate, x_discrete, f_discrete))
| null |
numerical_tools.py
|
numerical_tools.py
|
py
| 1,810 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.asarray",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.vectorize",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 51,
"usage_type": "call"
}
] |
452811035
|
from toolchain import Recipe, shprint
from os.path import join, exists
import sh
import os
import fnmatch
import shutil
class HostSetuptools3(Recipe):
depends = ["openssl", "hostpython3"]
archs = ["x86_64"]
url = "setuptools"
def prebuild_arch(self, arch):
hostpython = sh.Command(self.ctx.hostpython)
sh.curl("-O", "https://bootstrap.pypa.io/ez_setup.py")
shprint(hostpython, "./ez_setup.py")
# Extract setuptools egg and remove .pth files. Otherwise subsequent
# python package installations using setuptools will raise exceptions.
# Setuptools version 28.3.0
site_packages_path = join(
self.ctx.dist_dir, 'hostpython3',
'lib', 'python3.7', 'site-packages')
os.chdir(site_packages_path)
with open('setuptools.pth', 'r') as f:
setuptools_egg_path = f.read().strip('./').strip('\n')
print("setuptools_egg_path=", setuptools_egg_path)
unzip = sh.Command('unzip')
shprint(unzip, "-o", setuptools_egg_path)
os.remove(setuptools_egg_path)
os.remove('setuptools.pth')
os.remove('easy-install.pth')
shutil.rmtree('EGG-INFO')
recipe = HostSetuptools3()
| null |
kivy-ios/recipes/host_setuptools3/__init__.py
|
__init__.py
|
py
| 1,243 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "toolchain.Recipe",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "sh.Command",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sh.curl",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "toolchain.shprint",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sh.Command",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "toolchain.shprint",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 33,
"usage_type": "call"
}
] |
546977789
|
"""
Write a function that merges integer from sorted files and returns an iterator
file1.txt:
1
3
5
file2.txt:
2
4
6
>>> list(merge_sorted_files(["file1.txt", "file2.txt"]))
[1, 2, 3, 4, 5, 6]
"""
from pathlib import Path
from typing import Iterator, List, Union
def merge_sorted_files(file_list: [str, str]) -> Iterator:
file1 = Path(file_list[0])
file2 = Path(file_list[1])
file1 = file1.read_text().splitlines()
file2 = file2.read_text().splitlines()
list_merged = []
len1 = len(file1)
len2 = len(file2)
for index in range(max(len1, len2)):
if index + 1 <= len1:
list_merged += [int(file1[index])]
if index + 1 <= len2:
list_merged += [int(file2[index])]
return list_merged
print(merge_sorted_files(["file1.txt", "file2.txt"]))
| null |
homework9/task1.py
|
task1.py
|
py
| 811 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pathlib.Path",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "typing.Iterator",
"line_number": 18,
"usage_type": "name"
}
] |
610779244
|
'''loading and interpreting game data'''
import sys
from os.path import normpath
from importlib import import_module
from rpyg.world import *
class Loader:
def __init__(self, path='.', package='data'):
self.path = path
self.package = package
self._data = None
def load(self):
sys.path.insert(0, normpath(self.path))
self._data = import_module(self.package)
def get_world(self, player):
return parse_world(self._data.world, player)
def parse_world(data, player_name):
place_player = player = Entity(player_name, data.player_symbol, False)
rooms = {}
for rm_name, rm_data in data.world.items():
key = rm_data['key']
room = {}
for z, layer in enumerate(rm_data['layers']):
for y, row in enumerate(layer):
for x, char in enumerate(row):
if char == data.ignore_symbol:
continue
elif char in key:
edata = key[char]._asdict()
edata['symbol'] = char
entity = Entity(**edata)
elif char == data.player_symbol:
entity = place_player
place_player = None
room[(x, y, z)] = entity
rooms[rm_name] = room
return World(player, rooms, data.starting_room, data.initial_record)
| null |
rpyg/data.py
|
data.py
|
py
| 1,428 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.path.insert",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.normpath",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "importlib.import_module",
"line_number": 18,
"usage_type": "call"
}
] |
571199195
|
# importing required libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.arima_model import ARIMA
from sklearn.model_selection import TimeSeriesSplit
def test_stationarity(timeseries):
#Determing rolling statistics
rolmean = timeseries.rolling(12).mean()
rolstd = timeseries.rolling(12).std()
#Plot rolling statistics:
orig = plt.plot(timeseries, color='blue',label='Original')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
std = plt.plot(rolstd, color='black', label = 'Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation')
plt.show(block=False)
#Perform Dickey-Fuller test:
print('Results of Dickey-Fuller Test:')
dftest = adfuller(timeseries, autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key,value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
print(dfoutput)
# Now, we will load the data set and look at some initial rows and data types of the columns:
"""
data = pd.read_csv('testtop.csv')
print (data.head())
print ('\n Data Types:')
print (data.dtypes)
"""
# The data contains a particular month and number of passengers travelling in that month. In order to read the data as a time series, we have to pass special arguments to the read_csv command:
dateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m')
#data = pd.read_csv('AirPassengers.csv', parse_dates=['Month'], index_col='Month',date_parser=dateparse)
data = pd.read_csv('Sunspots.csv', header=0, index_col=0)
X = data.values
splits = TimeSeriesSplit(n_splits=3)
plt.figure(1)
index = 1
for train_index, test_index in splits.split(X):
train = X[train_index]
test = X[test_index]
print('Observations: %d' % (len(train) + len(test)))
print('Training Observations: %d' % (len(train)))
print('Testing Observations: %d' % (len(test)))
plt.subplot(310 + index)
plt.plot(train)
plt.plot([None for i in train] + [x for x in test])
index += 1
plt.show()
"""
## NOTE: You can run remaining codes in this article as well, using this live coding window.
ts = data['#Passengers']
ts_log = np.log(ts)
ts_log_diff = ts_log - ts_log.shift()
moving_avg = ts_log.rolling(12).mean()
expwighted_avg = ts_log.ewm(halflife=12).mean()
#plt.plot(ts_log-ts_log.shift())
#plt.plot()
#plt.plot(moving_avg, color='red')
ts_log_moving_avg_diff = ts_log - expwighted_avg
ts_log_moving_avg_diff.head(12)
ts_log_moving_avg_diff.dropna(inplace=True)
#test_stationarity(ts_log_moving_avg_diff)
#plt.plot(ts_log_moving_avg_diff.head(12))
model = ARIMA(ts_log, order=(2, 1, 2))
results_ARIMA = model.fit(disp=-1)
forecast = results_ARIMA.forecast()[0]
#plt.plot(ts_log_diff)
#plt.plot(results_ARIMA.fittedvalues, color='red')
#plt.title('RSS: %.4f'% sum((results_ARIMA.fittedvalues-ts_log_diff)**2))
predictions_ARIMA_diff = pd.Series(results_ARIMA.fittedvalues, copy=True)
predictions_ARIMA_diff_cumsum = predictions_ARIMA_diff.cumsum()
predictions_ARIMA_log = pd.Series(ts_log.iloc[0], index=ts_log.index)
predictions_ARIMA_log = predictions_ARIMA_log.add(predictions_ARIMA_diff_cumsum,fill_value=0)
predictions_ARIMA = np.exp(predictions_ARIMA_log)
plt.plot(ts)
plt.plot(predictions_ARIMA)
#plt.plot(forecast)
plt.title('RMSE: %.4f'% np.sqrt(sum((predictions_ARIMA-ts)**2)/len(ts)))
results_ARIMA.plot_predict(1,264)
plt.show()
#test_stationarity(ts)
"""
| null |
WebSensor/project/testTS.py
|
testTS.py
|
py
| 3,704 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "statsmodels.tsa.stattools.adfuller",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pandas.datetime.strptime",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pandas.datetime",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.TimeSeriesSplit",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 57,
"usage_type": "name"
}
] |
433313155
|
import time
import miio
import logging
import io
from datetime import timedelta
from .xiaomi_cloud_connector import XiaomiCloudConnector
from .const import *
import voluptuous as vol
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN, CONF_USERNAME, CONF_PASSWORD
from homeassistant.helpers import config_validation as cv
from homeassistant.components.camera import PLATFORM_SCHEMA, Camera
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=5)
DEFAULT_TRIMS = {
CONF_LEFT: 0,
CONF_RIGHT: 0,
CONF_TOP: 0,
CONF_BOTTOM: 0
}
COLOR_SCHEMA = vol.Or(
vol.All(vol.Length(min=3, max=3), vol.ExactSequence((cv.byte, cv.byte, cv.byte)), vol.Coerce(tuple)),
vol.All(vol.Length(min=4, max=4), vol.ExactSequence((cv.byte, cv.byte, cv.byte, cv.byte)), vol.Coerce(tuple))
)
PERCENT_SCHEMA = vol.All(vol.Coerce(float), vol.Range(min=0, max=100))
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(str, vol.Length(min=32, max=32)),
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_COUNTRY): vol.In(CONF_AVAILABLE_COUNTRIES),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_COLORS, default={}): vol.Schema({
vol.In(CONF_AVAILABLE_COLORS): COLOR_SCHEMA
}),
vol.Optional(CONF_ROOM_COLORS, default={}): vol.Schema({
cv.positive_int: COLOR_SCHEMA
}),
vol.Optional(CONF_DRAW, default=[]): vol.All(cv.ensure_list, [vol.In(CONF_AVAILABLE_DRAWABLES)]),
vol.Optional(CONF_MAP_TRANSFORM, default={CONF_SCALE: 1, CONF_ROTATE: 0, CONF_TRIM: DEFAULT_TRIMS}): vol.Schema(
{
vol.Optional(CONF_SCALE, default=1): cv.positive_int,
vol.Optional(CONF_ROTATE, default=0): vol.In([0, 90, 180, 270]),
vol.Optional(CONF_TRIM, default=DEFAULT_TRIMS): vol.Schema({
vol.Optional(CONF_LEFT, default=0): PERCENT_SCHEMA,
vol.Optional(CONF_RIGHT, default=0): PERCENT_SCHEMA,
vol.Optional(CONF_TOP, default=0): PERCENT_SCHEMA,
vol.Optional(CONF_BOTTOM, default=0): PERCENT_SCHEMA
}),
})
})
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
host = config[CONF_HOST]
token = config[CONF_TOKEN]
username = config[CONF_USERNAME]
password = config[CONF_PASSWORD]
country = config[CONF_COUNTRY]
name = config[CONF_NAME]
image_config = config[CONF_MAP_TRANSFORM]
colors = config[CONF_COLORS]
room_colors = config[CONF_ROOM_COLORS]
for room, color in room_colors.items():
colors[f"{COLOR_ROOM_PREFIX}{room}"] = color
drawables = config[CONF_DRAW]
if "all" in drawables:
drawables = CONF_AVAILABLE_DRAWABLES[1:]
async_add_entities(
[VacuumCamera(hass, host, token, username, password, country, name, image_config, colors, drawables)])
class VacuumCamera(Camera):
def __init__(self, hass, host, token, username, password, country, name, image_config, colors, drawables):
super().__init__()
self.hass = hass
self._vacuum = miio.Vacuum(host, token)
self._connector = XiaomiCloudConnector(username, password, country)
self._name = name
self._image_config = image_config
self._colors = colors
self._drawables = drawables
self._image = None
self._map_data = None
self._logged = False
@property
def frame_interval(self):
return 0.5
def camera_image(self):
return self._image
@property
def name(self):
return self._name
@property
def device_state_attributes(self):
if self._map_data is not None:
return {
ATTRIBUTE_CHARGER: self._map_data.charger,
ATTRIBUTE_IMAGE: self._map_data.image,
ATTRIBUTE_VACUUM_POSITION: self._map_data.vacuum_position,
ATTRIBUTE_PATH: self._map_data.path,
ATTRIBUTE_GOTO_PATH: self._map_data.goto_path,
ATTRIBUTE_GOTO_PREDICTED_PATH: self._map_data.predicted_path,
ATTRIBUTE_ZONES: self._map_data.zones,
ATTRIBUTE_GOTO: self._map_data.goto,
ATTRIBUTE_WALLS: self._map_data.walls,
ATTRIBUTE_NO_GO_AREAS: self._map_data.no_go_areas,
ATTRIBUTE_NO_MOPPING_AREAS: self._map_data.no_mopping_areas,
ATTRIBUTE_OBSTACLES: self._map_data.obstacles
}
return {}
@property
def should_poll(self):
return True
def update(self):
counter = 10
if not self._logged:
self._logged = self._connector.login()
map_name = "retry"
while map_name == "retry" and counter > 0:
time.sleep(0.1)
try:
map_name = self._vacuum.map()[0]
finally:
counter = counter - 1
if self._logged and map_name != "retry":
self._map_data = self._connector.get_map(map_name, self._colors, self._drawables, self._image_config)
if self._map_data is not None:
img_byte_arr = io.BytesIO()
self._map_data.image.data.save(img_byte_arr, format='PNG')
self._image = img_byte_arr.getvalue()
return
_LOGGER.warning("Unable to retrieve map data")
| null |
custom_components/xiaomi_cloud_map_extractor/camera.py
|
camera.py
|
py
| 5,583 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "voluptuous.Or",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "voluptuous.All",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "voluptuous.Length",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "voluptuous.ExactSequence",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "homeassistant.helpers.config_validation.byte",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.helpers.config_validation",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "voluptuous.Coerce",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "voluptuous.All",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "voluptuous.Length",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "voluptuous.ExactSequence",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "homeassistant.helpers.config_validation.byte",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.helpers.config_validation",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "voluptuous.Coerce",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "voluptuous.All",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "voluptuous.Coerce",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "voluptuous.Range",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "homeassistant.components.camera.PLATFORM_SCHEMA",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.camera.PLATFORM_SCHEMA.extend",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "voluptuous.Required",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "homeassistant.const.CONF_HOST",
"line_number": 34,
"usage_type": "argument"
},
{
"api_name": "voluptuous.Required",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "homeassistant.const.CONF_TOKEN",
"line_number": 35,
"usage_type": "argument"
},
{
"api_name": "voluptuous.Required",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "homeassistant.const.CONF_USERNAME",
"line_number": 36,
"usage_type": "argument"
},
{
"api_name": "voluptuous.Required",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "homeassistant.const.CONF_PASSWORD",
"line_number": 37,
"usage_type": "argument"
},
{
"api_name": "voluptuous.Required",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "voluptuous.Optional",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "homeassistant.const.CONF_NAME",
"line_number": 39,
"usage_type": "argument"
},
{
"api_name": "voluptuous.Optional",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "voluptuous.Optional",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "voluptuous.Optional",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "voluptuous.Optional",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "homeassistant.helpers.config_validation.string",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.helpers.config_validation",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "voluptuous.All",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "voluptuous.Length",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "homeassistant.helpers.config_validation.string",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.helpers.config_validation",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "homeassistant.helpers.config_validation.string",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.helpers.config_validation",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "voluptuous.In",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "homeassistant.helpers.config_validation.string",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.helpers.config_validation",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "voluptuous.Schema",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "voluptuous.In",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "voluptuous.Schema",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "homeassistant.helpers.config_validation.positive_int",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.helpers.config_validation",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "voluptuous.All",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "homeassistant.helpers.config_validation.ensure_list",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.helpers.config_validation",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "voluptuous.In",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "voluptuous.Schema",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "voluptuous.Optional",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "voluptuous.Optional",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "voluptuous.Optional",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "homeassistant.helpers.config_validation.positive_int",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.helpers.config_validation",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "voluptuous.In",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "voluptuous.Schema",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "voluptuous.Optional",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "voluptuous.Optional",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "voluptuous.Optional",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "voluptuous.Optional",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "homeassistant.const.CONF_HOST",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "homeassistant.const.CONF_TOKEN",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "homeassistant.const.CONF_USERNAME",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "homeassistant.const.CONF_PASSWORD",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "homeassistant.const.CONF_NAME",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.camera.Camera",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "miio.Vacuum",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "xiaomi_cloud_connector.XiaomiCloudConnector",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 142,
"usage_type": "call"
}
] |
419030188
|
import logging
import discord
from discord import RawReactionActionEvent
from discord.ext import commands
from cogs.utilities import credential_checks
from model.model import *
log = logging.getLogger(__name__)
async def _get_roles_from_iterable(iterable, guild: discord.Guild):
roles_to_provide = []
roles_to_remove = []
for alias in iterable:
role = guild.get_role(alias.role_id)
if role is None:
continue
roles_to_provide.append(role)
overwrites = RoleOverwrite.select().where(RoleOverwrite.role_id == alias.role_id)
for overwrite in overwrites:
role = guild.get_role(overwrite.overwrite_role_id)
if role is None:
continue
roles_to_remove.append(role)
return roles_to_provide, roles_to_remove
def _format_welcome_message(message: str, member: discord.Member):
return message.format(member.mention, member.display_name, member.guild.name)
async def _send_disappearing_notification(member: discord.Member, channel: discord.TextChannel, roles, prefix: str):
# Since join doesn't want to play fair.
formatted_roles = []
for role in roles:
formatted_roles.append(str(role))
# Send a disappearing message letting them know we've given them the roles.
msg = prefix + "`{}` for that flair!"
alert = await channel.send(msg.format(member, ", ".join(formatted_roles)))
await alert.delete(delay=10)
class Admin(commands.Cog):
"""Moderation related commands."""
def __init__(self, client: commands.Bot):
self.client = client
client.add_listener(self._on_member_join, "on_member_join")
client.add_listener(self._on_message, "on_message")
@commands.command()
async def invite(self, ctx):
""" Get a URL to invite the bot to your own server! """
await ctx.send(discord.utils.oauth_url(self.client.client_id))
@commands.command(no_pm=True, )
@credential_checks.has_permissions(ban_members=True)
async def ban(self, ctx, member: discord.User, delete_message_days: int = 0, reason: str = ""):
"""Bans a member from the server. You can provide a user either by their ID or mentioning them.
In order to do this, the bot and you must have Ban Member permissions.
"""
try:
await ctx.guild.ban(member, delete_message_days=delete_message_days, reason=reason)
except discord.Forbidden:
await ctx.send("The bot does not have permissions to kick members.")
except discord.HTTPException:
await ctx.send("Kicking failed. I think it was my fault.. try again?")
else:
await ctx.send("BOOM! Banned " + member.name)
@commands.command(no_pm=True, )
@credential_checks.has_permissions(ban_members=True)
async def massban(self, ctx, *, users: str):
"""
Finds and bans people based on a list of user IDs.
In order to do this, the bot and you must have Ban Member permissions.
Separate identifiers with a space..
"""
ids = users.split(" ")
for id in ids:
# Do the least expensive check..
user = self.client.get_user(id)
if user:
await self.ban(ctx, user)
continue
# If we must, do the expensive API check.
try:
user = await self.client.fetch_user(id)
await self.ban(ctx, user)
except discord.NotFound:
await ctx.send("Could not find user by ID {}".format(id))
@commands.command(no_pm=True, )
@credential_checks.has_permissions(ban_members=True)
async def unban(self, ctx, member: discord.User):
"""Unbans a member from the server. You can provide a user either by their ID or mentioning them.
In order to do this, the bot and you must have Ban Member permissions.
"""
try:
await ctx.guild.unban(member)
except discord.Forbidden:
await ctx.send("I don't have permission!.")
except discord.HTTPException:
await ctx.send("Something went wrong. I think it was my fault.. try again?")
else:
await ctx.send("Ok! Unbanned " + member.name)
@commands.command(no_pm=True, )
@credential_checks.has_permissions(kick_members=True)
async def kick(self, ctx, member: discord.Member):
"""Kicks a member from the server.
In order to do this, the bot and you must have Kick Member permissions.
"""
try:
await member.guild.kick(member)
except discord.Forbidden:
await ctx.send("The bot does not have permissions to kick members.")
except discord.HTTPException:
await ctx.send("Kicking failed.")
else:
await ctx.send("BOOM. Kicked " + member.name)
@commands.command(no_pm=True, )
@credential_checks.has_permissions(manage_roles=True)
async def unflaired(self, ctx):
""" Counts the total number of people without flairs in the server.
You must have "Manage Roles" in order to run this command."""
unflaired_users = []
for member in ctx.message.guild.members:
if len(member.roles) == 1:
unflaired_users.append(member)
plural = "people" if len(unflaired_users) > 1 else "person"
await ctx.send("I found " + str(len(unflaired_users)) + " " + plural + " without a role in this server.\n")
@commands.command(no_pm=True, )
@credential_checks.has_permissions(ban_members=True)
async def softban(self, ctx, user: discord.User, delete_message_days: int = 0, reason: str = ""):
"""Bans and unbans a member from the server. You can provide a user either by their ID or mentioning them.
In order to do this, the bot and you must have Ban Member permissions.
This should be used in order to kick a member from the server whilst also
deleting all the messages that they have sent.
"""
try:
await ctx.guild.ban(user, delete_message_days=delete_message_days, reason=reason)
await ctx.guild.unban(user)
except discord.Forbidden:
await ctx.send("I don't have permission to do this.")
except discord.HTTPException:
await ctx.send("Something went wrong. Seems like a problem on my end. Try again?")
else:
await ctx.send("Softbanned {.name}. Their messages should be gone now.".format(user))
@commands.command(no_pm=True, aliases=['rolecommand'])
@credential_checks.has_permissions(manage_roles=True)
async def addrole(self, ctx, role: discord.Role):
"""Adds a role to the bot so that it can either be self assigned by a user or given by an admin.
If you have roles with the same name the last one will be chosen.
You must have the "Manage Roles" privilege in order to use this command."""
await ctx.send("Ok! What command should be used to assign this role?")
alias = await self.client.wait_for(
"message",
check=lambda m: m.author == ctx.message.author and m.channel == ctx.message.channel)
alias = alias.content.lower().strip()
RoleAlias.create(alias=alias, role_id=role.id, server_id=ctx.message.guild.id, is_admin_only=False, uses=0)
embed = discord.Embed(description="Ok! Added!", title="Role Command Added", color=discord.Colour.dark_green())
embed.add_field(name="Role", value=role.name)
embed.add_field(name="Command", value=alias)
await ctx.send(embed=embed)
await self.client.process_commands(ctx.message)
@commands.command(aliases=["wm"])
@credential_checks.has_permissions(manage_guild=True)
async def welcomemessage(self, ctx, *, message):
"""
Creates an automatic welcome message for the bot to say when a new user joins. Use <@> for user mention,
<> for user name, and <s> for the server name!
You will be asked a follow up question for what channel the welcome message should be said in.
You must have manage server permissions to use this command.
"""
message_to_store = message.replace("<@>", "{0}").replace("<>", "{1}").replace("<s>", "{2}")
await ctx.send("What channel should that message be posted in?")
channel = await self.client.wait_for(
"message",
check=lambda m: m.author == ctx.message.author and m.channel == ctx.channel)
try:
channel = channel.channel_mentions[0]
except KeyError:
await ctx.send("Oops! You must mention a channel!")
return
WelcomeMessage.create(message=message_to_store, server_id=ctx.guild.id, channel_id=channel.id)
example = message_to_store.format(ctx.message.author.mention, ctx.message.author.display_name,
ctx.message.guild.name)
embed = discord.Embed(title="Welcome Message Added!", description="Here's an example: " + example,
colour=discord.Colour.dark_green())
embed.add_field(name="Channel", value=channel.mention)
await ctx.send(embed=embed)
@commands.command(aliases=["rmwm", "deletewelcome","removewm"])
@credential_checks.has_permissions(manage_guild=True)
async def removewelcomemessage(self, ctx):
"""
Allows the user to delete a welcome message from the guild.
You must have "Manage Guild" permissions to do this.
"""
messages = WelcomeMessage.get_for_guild(ctx.guild.id)
if len(messages) == 1:
messages[0].delete_instance()
await ctx.send("Your welcome message has been removed!")
return
elif len(messages) == 0:
await ctx.send("Your guild has no welcome messages!")
return
embed = discord.Embed(title="Pick welcome message to delete",
description="Reply with the number of the message you are trying to delete.",
colour=discord.Colour.dark_green())
index = 1
indexed_messages = {}
for message in messages:
preview = "In <#{.channel_id}>: {.message}".format(message, message)
preview = _format_welcome_message(preview, ctx.author)
embed.add_field(name="Message #{}".format(index),
value=preview)
indexed_messages[index] = message
index += 1
await ctx.send(embed=embed)
reply = await self.client.wait_for(
"message", check=lambda m: m.author == ctx.message.author and m.channel == ctx.channel)
chosen_index = reply.content.strip().replace("#", "")
if not chosen_index.isnumeric():
await ctx.send("I'm not sure what you just tried to delete.. Run the command again?")
return
else:
chosen_index = int(chosen_index)
if chosen_index in indexed_messages:
indexed_messages[chosen_index].delete_instance()
await ctx.send("Message has been removed!")
else:
await ctx.send("I'm not sure what you just tried to delete.. Run the command again?")
@commands.command(no_pm=True, aliases=["roleoverwrite", "rolerule"])
@credential_checks.has_permissions(manage_roles=True)
async def overwrite(self, ctx, *, role: discord.Role):
"""When a role has been assigned a command, any overwrite will remove that role when the command is used.
If you have roles with the same name the last one will be chosen.
You must have the "Manage Roles" privilege in order to use this command."""
guild = ctx.message.guild
await ctx.send(
"Reply with the names of the roles you want to find here. If you want to overwrite more than one, "
"separate them with a comma.")
choices = await self.client.wait_for(
"message", check=lambda m: m.author == ctx.message.author and m.channel == ctx.message.channel)
choices = choices.content.split(",")
for role_name in choices:
try:
chosen_role = await commands.RoleConverter().convert(ctx, role_name.strip())
except commands.BadArgument:
await ctx.send("The role " + role_name + " could not be found and was not added.")
continue
RoleOverwrite.create(role_id=role.id, overwrite_role_id=chosen_role.id, server_id=guild.id)
await ctx.send("Done! Roles will be overwritten when they use the command.")
@commands.command(no_pm=True, hidden=True, )
@credential_checks.has_permissions(manage_roles=True)
async def roleinfo(self, ctx, *, role: discord.Role):
"""Get information on a role.
If you have roles with the same name the last one will be chosen.
You must have the "Manage Roles" privilege in order to use this command."""
guild = ctx.message.guild
aliases = RoleAlias.select(RoleAlias.alias, RoleAlias.uses) \
.where((RoleAlias.server_id == ctx.guild.id) & (RoleAlias.role_id == role.id))
overwrites = RoleOverwrite.select(RoleOverwrite.overwrite_role_id) \
.where((RoleOverwrite.server_id == ctx.guild.id) & (RoleOverwrite.role_id == role.id))
total_users = 0
for member in guild.members:
if role in member.roles:
total_users = total_users + 1
embed = discord.Embed(title=role.name, colour=role.colour,
description="Role information for \"" + role.name + "\"")
embed.add_field(name="Members", value=total_users)
embed.add_field(name="Can be mentioned?", value="Yes" if role.mentionable else "No")
embed.add_field(name="Created", value=role.created_at.strftime("%d of %b (%Y) %H:%M:%S"))
for alias in aliases:
embed.add_field(name="Command Name", value=alias.alias)
embed.add_field(name="Command Uses", value=alias.uses)
formatted_overwrites = []
for overwrite in overwrites:
formatted_overwrites.append(discord.utils.get(ctx.guild.roles, id=overwrite.overwrite_role_id).name)
if formatted_overwrites:
embed.add_field(name="This command overwrites", value=", ".join(formatted_overwrites))
await ctx.send(embed=embed)
@commands.command(no_pm=True, hidden=True)
@credential_checks.has_permissions(manage_messages=True)
async def purge(self, ctx, number_of_messages: int, channel: discord.TextChannel = None):
"""Delete a number of messages from the channel you type it in!
Messages cannot be purged if they are older than 14 days.
You must have manage messages permission to use this command."""
if channel is None:
channel = ctx.channel
try:
await channel.purge(limit=number_of_messages + 1)
except discord.Forbidden:
await ctx.send("I don't have permission to do this!")
except discord.HTTPException:
await ctx.send("I was unable to purge these messages. Are any of them older than 14 days?")
async def _on_message(self, message: discord.Message):
if not message.content.startswith(self.client.command_prefix):
return
space_location = message.content.find(" ")
if space_location == -1:
command = message.content[1:]
else:
command = message.content[1:space_location]
aliases = RoleAlias.select().where(RoleAlias.server_id == message.guild.id and RoleAlias.alias % command)
roles_to_provide, roles_to_remove = await _get_roles_from_iterable(aliases, message.guild)
await message.author.add_roles(*roles_to_provide, reason="Added using the \"{}\" command.".format(command))
await message.author.remove_roles(*roles_to_remove, reason="Removed using the \"{}\" command.".format(command))
if len(roles_to_provide) > 0:
await message.delete(delay=2)
async def _on_member_join(self, member: discord.Member):
welcome_messages = WelcomeMessage.get_for_guild(member.guild.id)
for message in welcome_messages:
channel = self.client.get_channel(message.channel_id)
await channel.send(_format_welcome_message(message.message, member))
class FlairMessage(commands.Cog, name="Reaction Flairs"):
def __init__(self, client: commands.Bot):
self.client = client
client.add_listener(self._on_reaction, "on_raw_reaction_add")
client.add_listener(self._on_reaction_removed, "on_raw_reaction_remove")
@commands.command(aliases=["rfinfo"])
@commands.has_permissions(manage_roles=True)
async def reactionflairinfo(self, ctx, message: discord.Message):
""" Shows information on all reaction flairs against a given message."""
flairs = FlairMessageReactionModel.select().where(FlairMessageReactionModel.discord_message_id == message.id)
embeds = []
for flair in flairs:
desc = "[This message]({.jump_url}) is a flair message.\n"
role = message.guild.get_role(flair.role_id)
emoji = message.guild.get_role(flair.role_id)
embed = discord.Embed(
title="Role flair {}".format(flair.reference),
description=desc.format(message),
colour=role.colour
)
embed.add_field(name="Reference", value=flair.reference)
embed.add_field(name="Emoji", value=emoji.name)
embed.add_field(name="Message ID", value=message.id)
embed.add_field(name="Role", value=role.name)
embed.set_footer(
text="This may also remove roles when the flair is used. Check using the \"roleinfo\" command.")
embeds.append(embed)
await ctx.send(embed=embed)
if len(embeds) < 1:
await ctx.send("There are no reaction flairs for this message.")
return
# Todo send multiple embeds when this is supported.
# await ctx.send(embed=embed)
@commands.command(aliases=["rmrf"])
@commands.has_permissions(manage_roles=True)
async def removereactionflair(self, ctx, reference):
""" Deletes a reaction flair from the system.
You can only delete a reaction flair using its reference created when the reaction flair was set up.
If you've forgotten it use the rfinfo command. """
try:
model = FlairMessageReactionModel.get_by_id(reference)
except DoesNotExist:
await ctx.send("Could not find a reaction flair by the reference `{}`. Try the \"rfinfo\" command."
.format(reference))
return
model.delete_instance()
await ctx.send("Done! Removed that reaction flair.")
@commands.command(aliases=["rf"])
@commands.has_permissions(manage_roles=True)
async def reactionflair(self, ctx, message: discord.Message, emoji: discord.Emoji, role: discord.Role):
""" Adds a new flair reaction to a message.
This will give a role to the person who reacts to the message with this emoji.
This also respects the "overwrite" command.
If a user is uses this reaction then it will remove any roles that have been configured with that command."""
await message.add_reaction(emoji)
desc = "[This message]({.jump_url}) has been set up as a flair message.\n"
model = FlairMessageReactionModel.create(reference=FlairMessageReactionModel.generate_unique_reference(),
discord_message_id=message.id,
emoji_id=emoji.id,
role_id=role.id)
embed = discord.Embed(
title="Role flair added!",
description=desc.format(message),
colour=role.colour
)
embed.add_field(name="Reference", value=model.reference)
embed.add_field(name="Emoji", value=emoji.name)
embed.add_field(name="Message ID", value=message.id)
embed.add_field(name="Role", value=role.name)
embed.set_footer(
text="This may also remove roles when the flair is used. Check using the \"roleinfo\" command.")
await ctx.send(embed=embed)
async def _on_reaction_removed(self, reaction: RawReactionActionEvent):
reactions = FlairMessageReactionModel.select().where(
(FlairMessageReactionModel.discord_message_id == reaction.message_id) &
(FlairMessageReactionModel.emoji_id == reaction.emoji.id))
channel = self.client.get_channel(reaction.channel_id)
guild = channel.guild
member = channel.guild.get_member(reaction.user_id)
roles_to_remove = []
for to_remove in reactions:
role = guild.get_role(to_remove.role_id)
if role is None:
continue
roles_to_remove.append(role)
if 0 == len(roles_to_remove):
return
await member.remove_roles(*roles_to_remove,
reason="Removed using the \"{}\" reaction.".format(reaction.emoji.name))
await _send_disappearing_notification(
member, channel, roles_to_remove, "{.mention}, I have removed the role(s) ")
async def _on_reaction(self, reaction: RawReactionActionEvent):
reactions = FlairMessageReactionModel.select().where(
(FlairMessageReactionModel.discord_message_id == reaction.message_id) &
(FlairMessageReactionModel.emoji_id == reaction.emoji.id))
channel = self.client.get_channel(reaction.channel_id)
roles_to_provide, roles_to_remove = await _get_roles_from_iterable(reactions, channel.guild)
member = channel.guild.get_member(reaction.user_id)
emoji_name = reaction.emoji.name
if 0 == len(roles_to_provide):
return
await member.add_roles(*roles_to_provide, reason="Added using the \"{}\" reaction.".format(emoji_name))
await member.remove_roles(*roles_to_remove, reason="Removed using the \"{}\" reaction.".format(emoji_name))
await _send_disappearing_notification(
member, channel, roles_to_provide, "{.mention}, I have given you the role(s) ")
def setup(client):
client.add_cog(Admin(client))
client.add_cog(FlairMessage(client))
| null |
cogs/admin.py
|
admin.py
|
py
| 23,231 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "discord.Guild",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "discord.Member",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "discord.Member",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "discord.TextChannel",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "discord.utils.oauth_url",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "discord.utils",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "discord.User",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "discord.Forbidden",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "discord.HTTPException",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "cogs.utilities.credential_checks.has_permissions",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "cogs.utilities.credential_checks",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "discord.NotFound",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "cogs.utilities.credential_checks.has_permissions",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "cogs.utilities.credential_checks",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "discord.User",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "discord.Forbidden",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "discord.HTTPException",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "cogs.utilities.credential_checks.has_permissions",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "cogs.utilities.credential_checks",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "discord.Member",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "discord.Forbidden",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "discord.HTTPException",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "cogs.utilities.credential_checks.has_permissions",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "cogs.utilities.credential_checks",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "cogs.utilities.credential_checks.has_permissions",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "cogs.utilities.credential_checks",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "discord.User",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "discord.Forbidden",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "discord.HTTPException",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "cogs.utilities.credential_checks.has_permissions",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "cogs.utilities.credential_checks",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "discord.Role",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "discord.Embed",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "discord.Colour.dark_green",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "discord.Colour",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "cogs.utilities.credential_checks.has_permissions",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "cogs.utilities.credential_checks",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "discord.Embed",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "discord.Colour.dark_green",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "discord.Colour",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "cogs.utilities.credential_checks.has_permissions",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "cogs.utilities.credential_checks",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "discord.Embed",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "discord.Colour.dark_green",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "discord.Colour",
"line_number": 251,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 232,
"usage_type": "name"
},
{
"api_name": "cogs.utilities.credential_checks.has_permissions",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "cogs.utilities.credential_checks",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "discord.Role",
"line_number": 285,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.RoleConverter",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 302,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.BadArgument",
"line_number": 303,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 303,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 283,
"usage_type": "name"
},
{
"api_name": "cogs.utilities.credential_checks.has_permissions",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "cogs.utilities.credential_checks",
"line_number": 284,
"usage_type": "name"
},
{
"api_name": "discord.Role",
"line_number": 313,
"usage_type": "attribute"
},
{
"api_name": "discord.Embed",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "discord.utils.get",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "discord.utils",
"line_number": 344,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 311,
"usage_type": "name"
},
{
"api_name": "cogs.utilities.credential_checks.has_permissions",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "cogs.utilities.credential_checks",
"line_number": 312,
"usage_type": "name"
},
{
"api_name": "discord.TextChannel",
"line_number": 353,
"usage_type": "attribute"
},
{
"api_name": "discord.Forbidden",
"line_number": 364,
"usage_type": "attribute"
},
{
"api_name": "discord.HTTPException",
"line_number": 366,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 351,
"usage_type": "name"
},
{
"api_name": "cogs.utilities.credential_checks.has_permissions",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "cogs.utilities.credential_checks",
"line_number": 352,
"usage_type": "name"
},
{
"api_name": "discord.Message",
"line_number": 369,
"usage_type": "attribute"
},
{
"api_name": "discord.Member",
"line_number": 388,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 396,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 396,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 397,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 397,
"usage_type": "name"
},
{
"api_name": "discord.Message",
"line_number": 404,
"usage_type": "attribute"
},
{
"api_name": "discord.Embed",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 402,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.has_permissions",
"line_number": 403,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 403,
"usage_type": "name"
},
{
"api_name": "model.model",
"line_number": 445,
"usage_type": "name"
},
{
"api_name": "model.model.delete_instance",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "model.model",
"line_number": 451,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 437,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.has_permissions",
"line_number": 438,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 438,
"usage_type": "name"
},
{
"api_name": "discord.Message",
"line_number": 456,
"usage_type": "attribute"
},
{
"api_name": "discord.Emoji",
"line_number": 456,
"usage_type": "attribute"
},
{
"api_name": "discord.Role",
"line_number": 456,
"usage_type": "attribute"
},
{
"api_name": "model.model",
"line_number": 466,
"usage_type": "name"
},
{
"api_name": "discord.Embed",
"line_number": 471,
"usage_type": "call"
},
{
"api_name": "model.model.reference",
"line_number": 477,
"usage_type": "attribute"
},
{
"api_name": "model.model",
"line_number": 477,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 454,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 454,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.has_permissions",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 455,
"usage_type": "name"
},
{
"api_name": "discord.RawReactionActionEvent",
"line_number": 486,
"usage_type": "name"
},
{
"api_name": "discord.RawReactionActionEvent",
"line_number": 513,
"usage_type": "name"
}
] |
560996084
|
from os import path, makedirs
from io import open
from json import load, dump
from .classes import TranslationDictionary
from multiprocessing.managers import BaseManager
dicFolder = "translations"
def getDictionaries (manager):
mainDict = {}
for c in TranslationDictionary.contextNames:
dictionary = getDic(c)
sharedDictionary = manager.dict()
sharedDictionary.update(dictionary)
mainDict[c] = sharedDictionary
return mainDict
def saveDictionaries (mainDict):
for key in TranslationDictionary.contextNames:
simplerInsideDic = {}
simplerInsideDic.update(mainDict[key])
saveDic(simplerInsideDic, key)
def getDic (filename):
dic = {}
try:
if filename != "0Level":
if path.isfile(path.join(dicFolder, (filename + "Machine.json"))):
try:
with open(path.join(dicFolder, (filename + "Machine.json")), "r", encoding="utf-8") as f:
dic.update(load(f, encoding="utf-8"))
except Exception as e:
with open(path.join(dicFolder, (filename + "Machine.json")), "r", encoding="utf-8-sig") as f:
dic.update(load(f, encoding="utf-8-sig"))
if path.isfile(path.join(dicFolder, (filename + "Human.json"))):
try:
with open(path.join(dicFolder, (filename + "Human.json")), "r", encoding="utf-8") as f:
dic.update(load(f, encoding="utf-8"))
except Exception as e:
with open(path.join(dicFolder, (filename + "Human.json")), "r", encoding="utf-8-sig") as f:
dic.update(load(f, encoding="utf-8-sig"))
else:
if path.isfile(path.join(dicFolder, (filename + ".json"))):
try:
with open(path.join(dicFolder, (filename + ".json")), "r", encoding="utf-8") as f:
dic.update(load(f, encoding="utf-8"))
except Exception as e:
with open(path.join(dicFolder, (filename + ".json")), "r", encoding="utf-8-sig") as f:
dic.update(load(f, encoding="utf-8-sig"))
return dic
except Exception as e:
print(str(e) + ": Error loading old translations. Will create new file. Stop now if that's not wanted.")
return {}
def saveDic (dic, filename):
if not path.exists(dicFolder):
makedirs(dicFolder)
machine = {}
personal = {}
for key in dic:
if key != "open":
if not TranslationDictionary.isMachineTranslation(dic[key]):
personal[key] = dic[key]
elif TranslationDictionary.isCurrentMachineTranslation(dic[key]):
machine[key] = dic[key]
if filename != "0Level":
with open(path.join(dicFolder, (filename + "Machine.json")), "w", encoding="utf-8") as f:
dump(machine, f, indent=4, sort_keys=True, ensure_ascii=False)
with open(path.join(dicFolder, (filename + "Human.json")), "w", encoding="utf-8") as f:
dump(personal, f, indent=4, sort_keys=True, ensure_ascii=False)
| null |
lib/dictionaryLoader.py
|
dictionaryLoader.py
|
py
| 3,165 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "classes.TranslationDictionary.contextNames",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "classes.TranslationDictionary",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "classes.TranslationDictionary.contextNames",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "classes.TranslationDictionary",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "io.open",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "io.open",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "io.open",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "io.open",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "io.open",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "io.open",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "os.makedirs",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "classes.TranslationDictionary.isMachineTranslation",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "classes.TranslationDictionary",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "classes.TranslationDictionary.isCurrentMachineTranslation",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "classes.TranslationDictionary",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "io.open",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "json.dump",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "io.open",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "json.dump",
"line_number": 70,
"usage_type": "call"
}
] |
140150669
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
from django.conf import settings as django_settings
from django.utils.translation import ugettext_lazy as _, get_language
from django.utils.formats import date_format
from django.utils.safestring import mark_safe
from django.utils.translation import string_concat
from magi import settings
from magi.item_model import getInfoFromChoices
from magi.magicollections import (
MagiCollection,
AccountCollection as _AccountCollection,
PrizeCollection as _PrizeCollection,
DonateCollection as _DonateCollection,
ActivityCollection as _ActivityCollection,
)
from magi.utils import (
staticImageURL,
CuteFormType,
CuteFormTransform,
custom_item_template,
torfc2822,
setSubField,
jsv,
mergedFieldCuteForm,
FAVORITE_CHARACTERS_IMAGES,
)
from sukutomo import forms, models
from sukutomo.utils import (
sortIdolUnit,
)
############################################################
# Prize Collection
class PrizeCollection(_PrizeCollection):
enabled = True
############################################################
# Donate Collection
class DonateCollection(_DonateCollection):
enabled = True
############################################################
# Account Collection
class AccountCollection(_AccountCollection):
form_class = forms.AccountForm
navbar_link_list = 'community'
filter_cuteform = {
'accept_friend_requests': {
'type': CuteFormType.YesNo,
},
'i_play_with': {
'to_cuteform': lambda k, v: models.Account.PLAY_WITH[models.Account.get_reverse_i('play_with', k)]['icon'],
'transform': CuteFormTransform.FlaticonWithText,
},
'i_version': {
'to_cuteform': lambda k, v: models.VERSIONS[models.Account.get_reverse_i('version', k)]['image'],
'image_folder': 'language',
'transform': CuteFormTransform.ImagePath,
},
'i_os': {
'transform': CuteFormTransform.FlaticonWithText,
},
}
############################################################
# Activity Collection
class ActivityCollection(_ActivityCollection):
class ListView(_ActivityCollection.ListView):
def extra_context(self, context):
super(ActivityCollection.ListView, self).extra_context(context)
context['game_name'] = _('School Idol Festival')
context['t_site_name'] = _('School Idol Tomodachi')
context['site_description'] = _(u'The {game} Database & Community').format(game=_('Love Live!'))
############################################################
# Idols Collection
IDOLS_ICONS = {
'name': 'id',
'japanese_name': 'id',
'school': 'school',
'year': 'education',
'age': 'scoreup',
'birthday': 'birthday',
'height': 'measurements',
'blood': 'hp',
'bust': 'measurements',
'waist': 'measurements',
'hips': 'measurements',
'color': 'palette',
'hobbies': 'hobbies',
'favorite_food': 'food-like',
'least_favorite_food' : 'food-dislike',
'description': 'author',
}
IDOL_ORDER = [
'image', 'name', 'japanese_name', 'attribute', 'unit', 'subunit', 'school',
'year', 'astrological_sign', 'birthday', 'age', 'blood', 'measurements', 'color',
'hobbies', 'favorite_food', 'least_favorite_food', 'description',
]
IDOLS_CUTEFORM = {
'i_unit': {
},
'i_subunit': {
},
'sub_unit': {
'to_cuteform': lambda k, v: (
staticImageURL(k, folder='i_unit', extension='png') if float(k) - 2 < 0 else
staticImageURL(int(k) - 2, folder='i_subunit', extension='png')
),
'title': _('Unit'),
'extra_settings': {
'modal': 'true',
'modal-text': 'true',
},
},
'i_attribute': {
},
'i_year': {
'type': CuteFormType.HTML,
},
'i_astrological_sign': {
},
'i_blood': {
'type': CuteFormType.HTML,
},
}
mergedFieldCuteForm(IDOLS_CUTEFORM, {
'title': string_concat(_('Unit'), ' / ', _('Subunit')),
'extra_settings': {
'modal': 'true',
'modal-text': 'true',
},
}, OrderedDict ([
('i_unit', lambda k, v: models.Idol.unitImage(i=int(k))),
('i_subunit', lambda k, v: models.Idol.subUnitImage(i=int(k))),
]))
class IdolCollection(MagiCollection):
queryset = models.Idol.objects.all()
title = _('Idol')
plural_title = _('Idols')
multipart = True
form_class = forms.IdolForm
reportable = False
blockable = False
translated_fields = ('name', 'hobbies', 'favorite_food', 'least_favorite_food', 'description', )
icon = 'idol'
navbar_link_list = 'lovelive'
filter_cuteform = IDOLS_CUTEFORM
def to_fields(self, view, item, *args, **kwargs):
fields = super(IdolCollection, self).to_fields(view, item, *args, icons=IDOLS_ICONS, images={
'attribute': staticImageURL(item.i_attribute, folder='i_attribute', extension='png'),
'unit': staticImageURL(item.i_unit, folder='i_unit', extension='png'),
'subunit': staticImageURL(item.i_subunit, folder='i_subunit', extension='png'),
'astrological_sign': staticImageURL(
item.i_astrological_sign,
folder='i_astrological_sign',
extension='png',
),
}, **kwargs)
if item.japanese_name and get_language() == 'ja':
setSubField(fields, 'name', key='value', value=item.japanese_name)
setSubField(fields, 'birthday', key='type', value='text')
setSubField(fields, 'birthday', key='value', value=lambda f: date_format(item.birthday, format='MONTH_DAY_FORMAT', use_l10n=True))
return fields
class ItemView(MagiCollection.ItemView):
def to_fields(self, item, order=None, extra_fields=None, exclude_fields=None, *args, **kwargs):
if extra_fields is None: extra_fields = []
if exclude_fields is None: exclude_fields = []
if order is None: order = []
values = []
for fieldName, verbose_name in models.Idol.MEASUREMENT_DETAILS:
value = getattr(item, fieldName)
exclude_fields.append(fieldName)
if value:
values.append(mark_safe(u'<b>{}</b>: {} cm'.format(verbose_name, value)))
if values:
extra_fields.append(('measurements', {
'verbose_name': _('Measurements'),
'type': 'list',
'value': values,
'icon': 'measurements',
}))
if item.school is not None:
exclude_fields.append('i_year')
if item.birthday is not None:
exclude_fields += ['age', 'i_astrological_sign']
exclude_fields.append('japanese_name')
order = IDOL_ORDER + order
fields = super(IdolCollection.ItemView, self).to_fields(item, *args, order=order, extra_fields=extra_fields, exclude_fields=exclude_fields, **kwargs)
if item.birthday:
if item.astrological_sign is not None:
setSubField(fields, 'birthday', key='icon', value=None)
setSubField(fields, 'birthday', key='image', value=staticImageURL(item.i_astrological_sign, folder='i_astrological_sign', extension='png'))
if item.age:
setSubField(fields, 'birthday', key='type', value='text_annotation')
setSubField(fields, 'birthday', key='annotation', value=_('{age} years old').format(age=item.age))
if item.school and item.year:
setSubField(fields, 'school', key='type', value='title_text')
setSubField(fields, 'school', key='title', value=item.t_school)
setSubField(fields, 'school', key='value', value='{}'.format(unicode(item.t_year)))
setSubField(fields, 'description', key='type', value='long_text')
if item.japanese_name:
if get_language() == 'ja':
setSubField(fields, 'name', key='value', value=item.japanese_name)
else:
setSubField(fields, 'name', key='type', value='text_annotation')
setSubField(fields, 'name', key='annotation', value=item.japanese_name)
return fields
class ListView(MagiCollection.ListView):
filter_form = forms.IdolFilterForm
item_template = custom_item_template
per_line = 9
page_size = 36
default_ordering = 'unit'
def get_queryset(self, queryset, parameters, request):
queryset = super(IdolCollection.ListView, self).get_queryset(queryset, parameters, request)
if request.GET.get('ordering', 'unit') == 'unit':
queryset = sortIdolUnit(queryset)
return queryset
def extra_context(self, context):
super(IdolCollection.ListView, self).extra_context(context)
if bool([k for k in context['request'].GET.keys() if k != 'page']):
context['per_line'] = 4
context['col_size'] = 3
class AddView(MagiCollection.AddView):
staff_required = True
permissions_required = ['manage_main_items']
class EditView(MagiCollection.EditView):
staff_required = True
permissions_required = ['manage_main_items']
allow_delete = True
############################################################
# SIFEvents Collection
EVENT_FIELDS_PER_VERSION = ['image', 'countdown', 'start_date', 'end_date']
EVENT_ITEM_FIELDS_ORDER = [
'banner', 'title', 'type', 'unit',
] + [
u'{}{}'.format(_v['prefix'], _f) for _v in models.VERSIONS.values()
for _f in EVENT_FIELDS_PER_VERSION
]
EVENTS_ICONS = {
'title': 'id',
'jp_start_date': 'date', 'jp_end_date': 'date',
'ww_start_date': 'date', 'ww_end_date': 'date',
'tw_start_date': 'date', 'tw_end_date': 'date',
'kr_start_date': 'date', 'kr_end_date': 'date',
'cn_start_date': 'date', 'cn_end_date': 'date',
'type': 'toggler',
}
class SIFEventCollection(MagiCollection):
queryset = models.SIFEvent.objects.all()
title = _('Event')
plural_title = _('Events')
multipart = True
form_class = forms.SIFEventForm
reportable = False
blockable = False
translated_fields = ('title', )
icon = 'event'
navbar_link_list = 'games'
_version_images = { _k: _v['image'] for _k, _v in models.VERSIONS.items() }
_version_prefixes = { _k: _v['prefix'] for _k, _v in models.VERSIONS.items() }
filter_cuteform = {
'i_unit': {
},
'version': {
'to_cuteform': lambda k, v: SIFEventCollection._version_images[k],
'image_folder': 'language',
'transform': CuteFormTransform.ImagePath,
},
}
def to_fields(self, view, item, *args, **kwargs):
fields = super(SIFEventCollection, self).to_fields(view, item, *args, icons=EVENTS_ICONS, images={
'unit': staticImageURL(item.i_unit, folder='i_unit', extension='png'),
}, **kwargs)
setSubField(fields, 'jp_start_date', key='timezones', value=['Asia/Tokyo', 'Local time'])
setSubField(fields, 'jp_end_date', key='timezones', value=['Asia/Tokyo', 'Local time'])
setSubField(fields, 'ww_start_date', key='timezones', value=['UTC', 'Local time'])
setSubField(fields, 'ww_end_date', key='timezones', value=['UTC', 'Local time'])
setSubField(fields, 'tw_start_date', key='timezones', value=['Asia/Taipei', 'Local time'])
setSubField(fields, 'tw_end_date', key='timezones', value=['Asia/Taipei', 'Local time'])
setSubField(fields, 'kr_start_date', key='timezones', value=['Asia/Seoul', 'Local time'])
setSubField(fields, 'kr_end_date', key='timezones', value=['Asia/Seoul', 'Local time'])
setSubField(fields, 'cn_start_date', key='timezones', value=['UTC', 'Local time'])
setSubField(fields, 'cn_end_date', key='timezones', value=['UTC', 'Local time'])
return fields
class ItemView(MagiCollection.ItemView):
def to_fields(self, item, order=None, extra_fields=None, exclude_fields=None, *args, **kwargs):
if extra_fields is None: extra_fields = []
if exclude_fields is None: exclude_fields = []
if order is None: order = []
exclude_fields.append('c_versions')
for version, version_details in models.VERSIONS.items():
# Create countdown if event is upcoming/ongoing
status = getattr(item, u'{}status'.format(version_details['prefix']))
if status and status != 'ended':
start_date = getattr(item, u'{}start_date'.format(version_details['prefix']))
end_date = getattr(item, u'{}end_date'.format(version_details['prefix']))
extra_fields += [
(u'{}countdown'.format(version_details['prefix']), {
'verbose_name': string_concat(version_details['translation'], ' - ', _('Countdown')),
'value': mark_safe(u'<span class="fontx1-5 countdown" data-date="{date}" data-format="{sentence}"></h4>').format(
date=torfc2822(end_date if status == 'current' else start_date),
sentence=_('{time} left') if status == 'current' else _('Starts in {time}'),
),
'icon': 'hourglass',
'type': 'html',
}),
]
exclude_fields.append('start_date')
exclude_fields.append('end_date')
order = EVENT_ITEM_FIELDS_ORDER + order
fields = super(SIFEventCollection.ItemView, self).to_fields(
item, *args, order=order, extra_fields=extra_fields, exclude_fields=exclude_fields, **kwargs)
return fields
class ListView(MagiCollection.ListView):
filter_form = forms.SIFEventFilterForm
per_line = 2
default_ordering = '-jp_start_date'
def _modification_extra_context(self, context):
if 'js_variables' not in context:
context['js_variables'] = {}
context['js_variables']['version_prefixes'] = jsv(self._version_prefixes)
context['js_variables']['fields_per_version'] = jsv(EVENT_FIELDS_PER_VERSION)
class AddView(MagiCollection.AddView):
staff_required = True
permissions_required = ['manage_main_items']
ajax_callback = 'loadVersions'
def extra_context(self, context):
super(SIFEventCollection.AddView, self).extra_context(context)
self.collection._modification_extra_context(context)
class EditView(MagiCollection.EditView):
staff_required = True
permissions_required = ['manage_main_items']
ajax_callback = 'loadVersions'
allow_delete = True
def extra_context(self, context):
super(SIFEventCollection.EditView, self).extra_context(context)
self.collection._modification_extra_context(context)
############################################################
# Songs Collection
SONG_FIELDS_PER_DIFFICULTY = ['notes', 'difficulty']
SONGS_ICONS = {
'title': 'id', 'romaji':'id', 'versions':'world', 'locations':'world',
'unlock':'unlock', 'daily':'toggler', 'b_side_start': 'date',
'b_side_end': 'date', 'release':'date', 'itunes_id':'play',
'length':'times','bpm':'hp', 'master_swipe':'index',
'hits': 'deck', 'daily': 'trade', 'b-side': 'times',
}
class SongCollection(MagiCollection):
queryset = models.Song.objects.all()
title = _('Song')
plural_title = _('Songs')
multipart = True
form_class = forms.SongForm
reportable = False
blockable = False
translated_fields = ('title', )
icon = 'song'
navbar_link_list = 'lovelive'
_version_images = { _k: _v['image'] for _k, _v in models.Account.VERSIONS.items() }
_version_prefixes = { _k: _v['prefix'] for _k, _v in models.Account.VERSIONS.items() }
_location_to_cuteform = {
'hits': 'deck',
'daily': 'trade',
'bside': 'times',
}
filter_cuteform = {
'i_attribute': {
},
'i_unit': {
},
'i_subunit': {
'image_folder': 'i_subunit',
'title': _('Subunit'),
'extra_settings': {
'modal': 'true',
'modal-text': 'true',
},
},
'version': {
'to_cuteform': lambda k, v: SongCollection._version_images[k],
'image_folder': 'language',
'transform': CuteFormTransform.ImagePath,
},
'available': {
'type': CuteFormType.YesNo,
},
'location': {
'transform': CuteFormTransform.Flaticon,
'to_cuteform': lambda k, v: SongCollection._location_to_cuteform[k],
},
'sub_unit': {
'to_cuteform': lambda k, v: (
staticImageURL(k, folder='i_unit', extension='png') if float(k) - 2 < 0 else
staticImageURL(int(k) - 2, folder='i_subunit', extension='png')
),
'title': _('Unit'),
'extra_settings': {
'modal': 'true',
'modal-text': 'true',
},
},
}
def to_fields(self, view, item, *args, **kwargs):
fields = super(SongCollection, self).to_fields(view, item, *args, icons=SONGS_ICONS, images={
'attribute': staticImageURL(item.i_attribute, folder='i_attribute', extension='png'),
'unit': staticImageURL(item.i_unit, folder='i_unit', extension='png'),
'subunit': staticImageURL(item.i_subunit, folder='i_subunit', extension='png'),
}, **kwargs)
setSubField(fields, 'b_side_start', key='timezones', value=['Asia/Tokyo', 'Local time'])
setSubField(fields, 'b_side_end', key='timezones', value=['Asia/Tokyo', 'Local time'])
setSubField(fields, 'release', key='timezones', value=['Asia/Tokyo', 'Local time'])
setSubField(fields, 'length', key='value', value=lambda f: item.length_in_minutes)
return fields
class ItemView(MagiCollection.ItemView):
top_illustration = 'include/topSongItem'
def to_fields(self, item, order=None, extra_fields=None, exclude_fields=None, *args, **kwargs):
if extra_fields is None: extra_fields = []
if exclude_fields is None: exclude_fields = []
if order is None: order = []
values = u' '
for fieldName, verbose_name in models.Song.SONGWRITERS:
value = getattr(item, fieldName)
exclude_fields.append(fieldName)
if value:
values+=u'<b>{}:</b> {}<br />'.format(verbose_name, value)
if values and values is not u' ':
extra_fields.append(('songwriters', {
'verbose_name': _('Songwriters'),
'type': 'html',
'value': mark_safe(u'<div class="songwriters-details">{}</div>'.format(values)),
'icon': 'id',
}))
status = getattr(item, 'status')
if status and status != 'ended':
start_date = getattr(item, 'b_side_start')
end_date = getattr(item, 'b_side_end')
if item.b_side_master is True:
verbose = string_concat(_('B-Side'), ' - ', _('Countdown'), ' (MASTER)')
else:
verbose = string_concat(_('B-Side'), ' - ', _('Countdown'))
extra_fields += [
('countdown', {
'verbose_name': verbose,
'value': mark_safe(u'<span class="fontx1-5 countdown" data-date="{date}" data-format="{sentence}"></h4>').format(
date=torfc2822(end_date if status == 'current' else start_date),
sentence=_('{time} left') if status == 'current' else _('Starts in {time}'),
),
'icon': 'hourglass',
'type': 'html',
}),
]
else:
exclude_fields.append('b_side_start')
exclude_fields.append('b_side_end')
available = getattr(item, 'available')
if available == True:
av_value = True
else:
av_value = False
extra_fields += [
('availability', {
'verbose_name': _('Currently available'),
'value': av_value,
'icon': 'help',
'type': 'bool',
}),
]
for difficulty, d_verbose in models.Song.DIFFICULTIES:
difficulties = u' '
difficultystar = u'{}_difficulty'.format(difficulty)
difficultynote = u'{}_notes'.format(difficulty)
difficultystars = getattr(item, difficultystar)
difficultynotes = getattr(item, difficultynote)
temps = u'{} ☆ rating'.format(difficultystars)
tempn = u'{} notes'.format(difficultynotes)
if difficultystars:
if difficultynotes:
difficulties += u'{}<br />{}'.format(temps, tempn)
if difficulty is 'master' and item.master_swipe is True:
difficulties += u'<br />{}'.format(_('with SWIPE notes'))
else:
difficulties += u'{}'.format(temps)
elif difficultynotes:
difficulties += u'{}'.format(tempn)
if difficulty is 'master' and item.master_swipe is True:
difficulties += u'<br />{}'.format(_('with SWIPE notes'))
if difficulties is not u' ':
extra_fields.append((difficulty, {
'verbose_name': d_verbose,
'type': 'html',
'value': difficulties,
}))
exclude_fields.append(difficulty)
exclude_fields.append(difficultynote)
exclude_fields.append(difficultystar)
exclude_fields.append('title')
exclude_fields.append('romaji')
exclude_fields.append('cover')
exclude_fields.append('i_attribute')
exclude_fields.append('i_unit')
exclude_fields.append('i_subunit')
exclude_fields.append('c_locations')
exclude_fields.append('b_side_master')
exclude_fields.append('master_swipe')
order = ['itunes_id', 'length', 'bpm', 'c_versions', 'availability', 'unlock',
'daily', 'countdown', 'b_side_start', 'b_side_end', 'easy', 'normal', 'hard', 'expert',
'master', 'songwriters', 'release'] + order
fields = super(SongCollection.ItemView, self).to_fields(
item, *args, order=order, extra_fields=extra_fields, exclude_fields=exclude_fields, **kwargs)
if item.romaji and item.romaji != item.title:
setSubField(fields, 'title', key='type', value='title_text')
setSubField(fields, 'title', key='title', value=item.title)
setSubField(fields, 'title', key='value', value=item.romaji)
setSubField(fields, 'unlock', key='type', value='text')
setSubField(fields, 'unlock', key='value', value=u'Rank {}'.format(item.unlock))
setSubField(fields, 'easy', key='image', value=staticImageURL('easy', folder='difficulty', extension='png'))
setSubField(fields, 'normal', key='image', value=staticImageURL('normal', folder='difficulty', extension='png'))
setSubField(fields, 'hard', key='image', value=staticImageURL('hard', folder='difficulty', extension='png'))
setSubField(fields, 'expert', key='image', value=staticImageURL('expert', folder='difficulty', extension='png'))
setSubField(fields, 'master', key='image', value=staticImageURL('master', folder='difficulty', extension='png'))
return fields
def _modification_extra_context(self, context):
if 'js_variables' not in context:
context['js_variables'] = {}
context['js_variables']['version_prefixes'] = jsv(self._version_prefixes)
class ListView(MagiCollection.ListView):
filter_form = forms.SongFilterForm
item_template = custom_item_template
per_line = 4
default_ordering = '-release'
ajax_pagination_callback = 'loadSongs'
def extra_context(self, context):
super(SongCollection.ListView, self).extra_context(context)
self.collection._modification_extra_context(context)
class AddView(MagiCollection.AddView):
staff_required = True
permissions_required = ['manage_main_items']
ajax_callback = 'loadSongs'
def extra_context(self, context):
super(SongCollection.AddView, self).extra_context(context)
self.collection._modification_extra_context(context)
class EditView(MagiCollection.EditView):
staff_required = True
permissions_required = ['manage_main_items']
ajax_callback = 'loadSongs'
allow_delete = True
def extra_context(self, context):
super(SongCollection.EditView, self).extra_context(context)
self.collection._modification_extra_context(context)
############################################################
# Card Collection
CARD_AUTO_EXCLUDE = [
'limited', 'promo', 'support', 'rate', 'i_dependency', 'chance',
'number', 'length', 'i_center', 'i_group', 'boost_percent',
'smile_min', 'pure_min', 'cool_min', 'hp', 'i_skill_type',
] + models.Card.IDOLIZED_FIELDS + models.Card.UNIDOLIZED_FIELDS
def _idol_sub_unit_to_cuteform(k):
if float(k) - 2 < 0:
return staticImageURL(k, folder='i_unit', extension='png')
elif float(k) - 10 < 0:
return staticImageURL(int(k) - 2, folder='i_subunit', extension='png')
else:
return FAVORITE_CHARACTERS_IMAGES[int(k) - 10]
CARDS_CUTEFORM = {
'i_unit': {
},
'i_subunit': {
'image_folder': 'i_subunit',
'title': _('Subunit'),
},
'idol_sub_unit': {
'to_cuteform': lambda k, v: _idol_sub_unit_to_cuteform(k),
'title': _('Unit'),
'extra_settings': {
'modal': 'true',
'modal-text': 'true',
},
},
'i_attribute': {
},
'i_year': {
'type': CuteFormType.HTML,
},
'i_rarity': {
'image_folder': 'rarity_3',
'title': _('Rarity'),
},
'version': {
'to_cuteform': lambda k, v: EventCollection._version_images[k],
'image_folder': 'language',
'transform': CuteFormTransform.ImagePath,
},
'idol': {
'to_cuteform': lambda k, v: FAVORITE_CHARACTERS_IMAGES[k],
'title': _('Idol'),
'extra_settings': {
'modal': 'true',
'modal-text': 'true',
},
},
'skill': {
'transform': CuteFormTransform.FlaticonWithText,
'extra_settings': {
'modal': 'true',
'modal-text': 'true',
},
},
'i_skill_type': {
'transform': CuteFormTransform.Flaticon,
'to_cuteform': lambda _k, _v: SKILL_TYPE_ICONS[models.Skill.get_reverse_i('skill_type', _k)],
},
'in_set': {
'transform': CuteFormTransform.FlaticonWithText,
'extra_settings': {
'modal': 'true',
'modal-text': 'true',
},
},
'i_group': {
'transform': CuteFormTransform.Flaticon,
'to_cuteform': lambda k, v: CardCollection._center_boost_to_cuteform[models.Card.get_reverse_i('group', k)]
},
'card_type': {
'to_cuteform': lambda k, v: CardCollection._card_type_to_cuteform[k],
'transform': CuteFormTransform.Flaticon,
}
}
CARDS_ICONS = {
'name': 'id', 'card_id': 'id', 'versions': 'world', 'release': 'date',
'images': 'pictures', 'icons': 'pictures', 'transparents': 'pictures',
'art': 'pictures', 'hp': 'hp', 'details': 'author',
}
CARD_IMAGES = {
('image', _('Images')),
('icon', _('Icons')),
('transparent', _('Transparents')),
('art', _('Art')),
}
CARD_ORDER = [
'card_id', 'name', 'idol_details', 'i_rarity', 'i_attribute',
'c_versions', 'release', 'set', 'main_skill', 'leader_skill',
'icons', 'images', 'arts', 'transparents', 'details'
]
class CardCollection(MagiCollection):
queryset = models.Card.objects.all()
title = _('Card')
plural_title = _('Cards')
multipart = True
form_class = forms.CardForm
reportable = False
blockable = False
translated_fields = ('name', 'details')
icon = 'deck'
navbar_link_list = 'schoolidolfestival'
_center_boost_to_cuteform = {
'unit':'circles-grid',
'subunit':'share',
'year':'education',
}
_card_type_to_cuteform = {
'perm':'chest',
'limited':'hourglass',
'promo':'promo',
}
filter_cuteform = CARDS_CUTEFORM
def to_fields(self, view, item, *args, **kwargs):
# Add/Edit view auto-see All-attribute rarity symbols
#Item/List view see proper attribute v. when known
if item.attribute:
rarityfolder='rarity_' + str(item.i_attribute)
else:
rarityfolder='rarity_3'
fields = super(CardCollection, self).to_fields(view, item, *args, icons=CARDS_ICONS, images={
'attribute': staticImageURL(item.i_attribute, folder='i_attribute', extension='png'),
'rarity': staticImageURL(item.i_rarity, folder=rarityfolder, extension='png'),
}, **kwargs)
setSubField(fields, 'release', key='timezones', value=['Asia/Tokyo'])
setSubField(fields, 'card_id', key='type', value='text')
setSubField(fields, 'card_id', key='value', value=u'#{}'.format(item.card_id))
return fields
class ItemView(MagiCollection.ItemView):
top_illustration = 'items/cardItem'
ajax_callback = 'loadCard'
def to_fields(self, item, order=None, extra_fields=None, exclude_fields=None, *args, **kwargs):
if extra_fields is None: extra_fields = []
if exclude_fields is None: exclude_fields = []
if order is None: order = []
#Add ID Field
extra_fields.append(('id', {
'verbose_name': _(u'ID'),
'type': 'text',
'value': u'#{}'.format(item.id),
'icon': 'id',
}))
# Add Idol field
if item.idol:
extra_fields.append(('idol_details', {
'verbose_name': _('Idol'),
'type': 'html',
'value': string_concat('<a href="', item.idol.item_url, '"data-ajax-url="', item.idol.ajax_item_url,
'"data-ajax-title="', item.idol, '">', item.idol.t_name, '<img class="idol-small-image" src="',
item.idol.image_url,'"></img></a>'),
'icon': 'idol',
}))
# Add skill
if item.skill:
# Get list of variables to parse through
SKILL_REPLACE = models.Card.SKILL_REPLACE
if item.idol:
SKILL_REPLACE += models.Card.IDOL_REPLACE
skill_details = getattr(item, 'skill_details')
# Either insert the variable info or ???
for variable in SKILL_REPLACE:
if variable in models.Card.IDOL_REPLACE:
_item = item.idol
else:
_item = item
og = skill_details
if getattr(_item, variable, None) is not None:
#If translatable, get translated val instead
if variable in models.Card.SKILL_REPLACE_TRANSLATE:
var = getattr(_item, 't_{}'.format(variable))
else:
var = getattr(_item, variable)
else:
var = '???'
var_re = '{' + variable + '}'
skill_details = og.replace(var_re, mark_safe(var))
# Creates skill field
skill_sentence=_('{} (Level 1)').format(skill_details)
extra_fields.append(('main_skill', {
'verbose_name': _('Skill'),
'type': 'html',
'value': skill_sentence,
'icon': 'sparkle',
}))
# Add center skill
if item.center:
leader_skill = getattr(item, 'center_details')
leader_skill = leader_skill.format(getattr(item, 't_attribute'))
if item.rarity in ['UR', 'SSR']:
leader_second = _('plus {group} members\' {} pts. up by {}%')
if item.group is not 0 and item.boost_percent is not None:
for gvariable, ggvariable in models.Card.GROUP_CHOICES:
if ggvariable is item.t_group:
if item.t_group is not _('Unit'):
og = leader_second
var = getattr(item.idol, 't_' + gvariable)
leader_second = og.replace('{group}', var)
else:
og = leader_second
leader_second = og.replace('{group}', item.idol.unit)
leader_second = leader_second.format(item.t_attribute, item.boost_percent)
extra_fields.append(('leader_skill', {
'verbose_name': _('Leader Skill'),
'type': 'text',
'value': leader_skill,
'icon': 'center',
}))
# Add set
if item.in_set:
extra_fields.append(('set', {
'verbose_name': _('Set'),
'type': 'link',
'ajax_link': item.in_set.ajax_cards_url,
'link': item.in_set.cards_url,
'link_text': unicode(item.in_set),
'icon': 'scout-box',
}))
# Add images
for image, verbose_name in CARD_IMAGES:
#Regular images for each type
CARD_IMAGE_TYPES = [
getattr(item, u'{}_url'.format(image)),
getattr(item, u'{}_idol_url'.format(image)),
]
exclude_fields.append(image)
exclude_fields.append(image + '_idol')
# Old images
if image is not 'transparent':
CARD_IMAGE_TYPES += [
getattr(item, u'old_{}_url'.format(image)),
getattr(item, u'old_{}_idol_url'.format(image))
]
exclude_fields.append('old_' + image)
exclude_fields.append('old_' + image + '_idol')
# if type of image, append specific image field
if getattr(item, image):
extra_fields.append((u'{}s'.format(image), {
'verbose_name': verbose_name,
'type': 'images_links',
'images': [{
'value': image_url.format(image),
'link': image_url.format(image),
'verbose_name': verbose_name,
'link_text': verbose_name,
} for image_url in CARD_IMAGE_TYPES if image_url],
'icon': 'pictures',
}))
# Exclude certain fields by default
for field in CARD_AUTO_EXCLUDE:
exclude_fields.append(field)
order = CARD_ORDER + order
fields = super(CardCollection.ItemView, self).to_fields(
item, *args, order=order, extra_fields=extra_fields, exclude_fields=exclude_fields, **kwargs)
if item.skill:
setSubField(fields, 'main_skill', key='value', value=string_concat(
item.skill.card_html(), '<br />', skill_sentence))
if item.center:
setSubField(fields, 'leader_skill', key='type', value='title_text')
setSubField(fields, 'leader_skill', key='title', value=string_concat(item.t_attribute, ' ', item.t_center))
# if info for 2nd half of center skill is available
if item.group is not 0 and item.boost_percent is not None:
setSubField(fields, 'leader_skill', key='value', value=string_concat(leader_skill, ', ', leader_second))
return fields
class ListView(MagiCollection.ListView):
filter_form = forms.CardFilterForm
item_template = custom_item_template
per_line = 3
default_ordering = '-release,-card_id'
ajax_pagination_callback = 'loadCardList'
def get_queryset(self, queryset, parameters, request):
queryset = super(CardCollection.ListView, self).get_queryset(queryset, parameters, request)
if request.GET.get('ordering', None) in ['max_smile', 'max_pure', 'max_cool']:
queryset = queryset.extra(select={
'max_smile': 'CASE WHEN smile_max_idol NOT NULL THEN smile_max_idol WHEN smile_max NOT NULL THEN smile_max WHEN smile_min NOT NULL THEN smile_min END',
'max_pure': 'CASE WHEN pure_max_idol NOT NULL THEN pure_max_idol WHEN pure_max NOT NULL THEN pure_max WHEN pure_min NOT NULL THEN pure_min END',
'max_cool': 'CASE WHEN cool_max_idol NOT NULL THEN cool_max_idol WHEN cool_max NOT NULL THEN cool_max WHEN cool_min NOT NULL THEN cool_min END',
})
return queryset
def ordering_fields(self, item, only_fields=None, *args, **kwargs):
fields = super(CardCollection.ListView, self).ordering_fields(item, *args, only_fields=only_fields, **kwargs)
if 'max_smile' in only_fields:
fields['max_smile'] = {
'verbose_name': _('Smile'),
'value': item.smile_max_idol or item.smile_max or item.smile_min or '???',
'type': 'text',
'image': staticImageURL('0', folder='i_attribute', extension='png'),
}
if 'max_pure' in only_fields:
fields['max_pure'] = {
'verbose_name': _('Pure'),
'value': item.pure_max_idol or item.pure_max or item.pure_min or '???',
'type': 'text',
'image': staticImageURL('1', folder='i_attribute', extension='png'),
}
if 'max_cool' in only_fields:
fields['max_cool'] = {
'verbose_name': _('Cool'),
'value': item.cool_max_idol or item.cool_max or item.cool_min or '???',
'type': 'text',
'image': staticImageURL('2', folder='i_attribute', extension='png'),
}
return fields
class AddView(MagiCollection.AddView):
staff_required = True
permissions_required = ['manage_main_items']
class EditView(MagiCollection.EditView):
staff_required = True
permissions_required = ['manage_main_items']
allow_delete = True
############################################################
# Skill Collection
SKILL_TYPE_ICONS = {
'score':'scoreup', 'pl':'perfectlock',
'heal':'healer', 'stat':'statistics',
'support':'megaphone',
}
class SkillCollection(MagiCollection):
queryset = models.Skill.objects.all()
title = _('Skill')
plural_title = _('Skills')
multipart = True
form_class = forms.SkillForm
reportable = False
blockable = False
translated_fields = ('name', 'details',)
icon = 'sparkle'
navbar_link = False
permissions_required = ['manage_main_items']
filter_cuteform = {
'i_skill_type': {
'transform': CuteFormTransform.Flaticon,
'to_cuteform': lambda _k, _v: SKILL_TYPE_ICONS[models.Skill.get_reverse_i('skill_type', _k)],
},
}
def to_fields(self, view, item, *args, **kwargs):
fields = super(SkillCollection, self).to_fields(view, item, *args,
icons={'name':'id', 'skill_type':'category', 'details':'author'}, **kwargs)
return fields
class ListView(MagiCollection.ListView):
filter_form = forms.SkillFilterForm
item_template = custom_item_template
per_line = 6
class AddView(MagiCollection.AddView):
staff_required = True
permissions_required = ['manage_main_items']
class EditView(MagiCollection.EditView):
staff_required = True
permissions_required = ['manage_main_items']
allow_delete = True
############################################################
# Set Collection
class SetCollection(MagiCollection):
queryset = models.Set.objects.all()
title = _('Set')
plural_title = _('Sets')
multipart = True
form_class = forms.SetForm
reportable = False
blockable = False
translated_fields = ('title', )
icon = 'scout-box'
navbar_link_list = 'schoolidolfestival'
_set_type_icons = { 'gacha':'scout-box', 'event':'event' }
filter_cuteform = {
'i_unit': {
},
'i_set_type': {
'transform': CuteFormTransform.Flaticon,
'to_cuteform': lambda k, v: SetCollection._set_type_icons[models.Set.get_reverse_i('set_type', k)],
},
}
class ListView(MagiCollection.ListView):
filter_form = forms.SetFilterForm
item_template = custom_item_template
per_line = 4
class AddView(MagiCollection.AddView):
staff_required = True
permissions_required = ['manage_main_items']
class EditView(MagiCollection.EditView):
staff_required = True
permissions_required = ['manage_main_items']
allow_delete = True
| null |
sukutomo/magicollections.py
|
magicollections.py
|
py
| 43,280 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "magi.magicollections.PrizeCollection",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "magi.magicollections.DonateCollection",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "magi.magicollections.AccountCollection",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "sukutomo.forms.AccountForm",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.forms",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "magi.utils.CuteFormType.YesNo",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "magi.utils.CuteFormType",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.Account",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.Account.get_reverse_i",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "magi.utils.CuteFormTransform.FlaticonWithText",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "magi.utils.CuteFormTransform",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.VERSIONS",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.Account.get_reverse_i",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "sukutomo.models.Account",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "magi.utils.CuteFormTransform.ImagePath",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "magi.utils.CuteFormTransform",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "magi.utils.CuteFormTransform.FlaticonWithText",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "magi.utils.CuteFormTransform",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "magi.magicollections.ActivityCollection",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "magi.magicollections.ActivityCollection.ListView",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "magi.magicollections.ActivityCollection",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "magi.utils.CuteFormType.HTML",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "magi.utils.CuteFormType",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "magi.utils.CuteFormType.HTML",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "magi.utils.CuteFormType",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "magi.utils.mergedFieldCuteForm",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.string_concat",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "sukutomo.models.Idol.unitImage",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "sukutomo.models.Idol",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.Idol.subUnitImage",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "sukutomo.models.Idol",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.Idol.objects.all",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "sukutomo.models.Idol",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "sukutomo.forms.IdolForm",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.forms",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.get_language",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "django.utils.formats.date_format",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "magi.magicollections.MagiCollection.ItemView",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.Idol",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "django.utils.safestring.mark_safe",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.get_language",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "magi.magicollections.MagiCollection.ListView",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "sukutomo.forms.IdolFilterForm",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.forms",
"line_number": 235,
"usage_type": "name"
},
{
"api_name": "magi.utils.custom_item_template",
"line_number": 236,
"usage_type": "name"
},
{
"api_name": "sukutomo.utils.sortIdolUnit",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "magi.magicollections.MagiCollection.AddView",
"line_number": 253,
"usage_type": "attribute"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 253,
"usage_type": "name"
},
{
"api_name": "magi.magicollections.MagiCollection.EditView",
"line_number": 257,
"usage_type": "attribute"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 257,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.VERSIONS.values",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "sukutomo.models.VERSIONS",
"line_number": 270,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 270,
"usage_type": "name"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 284,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.SIFEvent.objects.all",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "sukutomo.models.SIFEvent",
"line_number": 285,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 285,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "sukutomo.forms.SIFEventForm",
"line_number": 289,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.forms",
"line_number": 289,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.VERSIONS.items",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "sukutomo.models.VERSIONS",
"line_number": 296,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 296,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.VERSIONS.items",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "sukutomo.models.VERSIONS",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 297,
"usage_type": "name"
},
{
"api_name": "magi.utils.CuteFormTransform.ImagePath",
"line_number": 305,
"usage_type": "attribute"
},
{
"api_name": "magi.utils.CuteFormTransform",
"line_number": 305,
"usage_type": "name"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "magi.magicollections.MagiCollection.ItemView",
"line_number": 332,
"usage_type": "attribute"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 332,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.VERSIONS.items",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "sukutomo.models.VERSIONS",
"line_number": 339,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 339,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.string_concat",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "django.utils.safestring.mark_safe",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "magi.utils.torfc2822",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "magi.magicollections.MagiCollection.ListView",
"line_number": 365,
"usage_type": "attribute"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 365,
"usage_type": "name"
},
{
"api_name": "sukutomo.forms.SIFEventFilterForm",
"line_number": 366,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.forms",
"line_number": 366,
"usage_type": "name"
},
{
"api_name": "magi.utils.jsv",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "magi.utils.jsv",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "magi.magicollections.MagiCollection.AddView",
"line_number": 376,
"usage_type": "attribute"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 376,
"usage_type": "name"
},
{
"api_name": "magi.magicollections.MagiCollection.EditView",
"line_number": 385,
"usage_type": "attribute"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 385,
"usage_type": "name"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 408,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.Song.objects.all",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "sukutomo.models.Song",
"line_number": 409,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 409,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "sukutomo.forms.SongForm",
"line_number": 413,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.forms",
"line_number": 413,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.Account.VERSIONS.items",
"line_number": 420,
"usage_type": "call"
},
{
"api_name": "sukutomo.models.Account",
"line_number": 420,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 420,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.Account.VERSIONS.items",
"line_number": 421,
"usage_type": "call"
},
{
"api_name": "sukutomo.models.Account",
"line_number": 421,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 421,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "magi.utils.CuteFormTransform.ImagePath",
"line_number": 444,
"usage_type": "attribute"
},
{
"api_name": "magi.utils.CuteFormTransform",
"line_number": 444,
"usage_type": "name"
},
{
"api_name": "magi.utils.CuteFormType.YesNo",
"line_number": 447,
"usage_type": "attribute"
},
{
"api_name": "magi.utils.CuteFormType",
"line_number": 447,
"usage_type": "name"
},
{
"api_name": "magi.utils.CuteFormTransform.Flaticon",
"line_number": 450,
"usage_type": "attribute"
},
{
"api_name": "magi.utils.CuteFormTransform",
"line_number": 450,
"usage_type": "name"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 458,
"usage_type": "call"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 471,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 474,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 475,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 476,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 477,
"usage_type": "call"
},
{
"api_name": "magi.magicollections.MagiCollection.ItemView",
"line_number": 481,
"usage_type": "attribute"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 481,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.Song",
"line_number": 490,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 490,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 497,
"usage_type": "call"
},
{
"api_name": "django.utils.safestring.mark_safe",
"line_number": 499,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.string_concat",
"line_number": 508,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 508,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.string_concat",
"line_number": 510,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 510,
"usage_type": "call"
},
{
"api_name": "django.utils.safestring.mark_safe",
"line_number": 514,
"usage_type": "call"
},
{
"api_name": "magi.utils.torfc2822",
"line_number": 515,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 516,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 535,
"usage_type": "call"
},
{
"api_name": "sukutomo.models.Song",
"line_number": 542,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 542,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 554,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 560,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 589,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 590,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 591,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 593,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 594,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 596,
"usage_type": "call"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 596,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 597,
"usage_type": "call"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 597,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 598,
"usage_type": "call"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 598,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 599,
"usage_type": "call"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 599,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 600,
"usage_type": "call"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 600,
"usage_type": "call"
},
{
"api_name": "magi.utils.jsv",
"line_number": 607,
"usage_type": "call"
},
{
"api_name": "magi.magicollections.MagiCollection.ListView",
"line_number": 609,
"usage_type": "attribute"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 609,
"usage_type": "name"
},
{
"api_name": "sukutomo.forms.SongFilterForm",
"line_number": 610,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.forms",
"line_number": 610,
"usage_type": "name"
},
{
"api_name": "magi.utils.custom_item_template",
"line_number": 611,
"usage_type": "name"
},
{
"api_name": "magi.magicollections.MagiCollection.AddView",
"line_number": 620,
"usage_type": "attribute"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 620,
"usage_type": "name"
},
{
"api_name": "magi.magicollections.MagiCollection.EditView",
"line_number": 629,
"usage_type": "attribute"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 629,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.Card",
"line_number": 646,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 646,
"usage_type": "name"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 650,
"usage_type": "call"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 652,
"usage_type": "call"
},
{
"api_name": "magi.utils.FAVORITE_CHARACTERS_IMAGES",
"line_number": 654,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 661,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 665,
"usage_type": "call"
},
{
"api_name": "magi.utils.CuteFormType.HTML",
"line_number": 674,
"usage_type": "attribute"
},
{
"api_name": "magi.utils.CuteFormType",
"line_number": 674,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 678,
"usage_type": "call"
},
{
"api_name": "magi.utils.CuteFormTransform.ImagePath",
"line_number": 683,
"usage_type": "attribute"
},
{
"api_name": "magi.utils.CuteFormTransform",
"line_number": 683,
"usage_type": "name"
},
{
"api_name": "magi.utils.FAVORITE_CHARACTERS_IMAGES",
"line_number": 686,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 687,
"usage_type": "call"
},
{
"api_name": "magi.utils.CuteFormTransform.FlaticonWithText",
"line_number": 694,
"usage_type": "attribute"
},
{
"api_name": "magi.utils.CuteFormTransform",
"line_number": 694,
"usage_type": "name"
},
{
"api_name": "magi.utils.CuteFormTransform.Flaticon",
"line_number": 701,
"usage_type": "attribute"
},
{
"api_name": "magi.utils.CuteFormTransform",
"line_number": 701,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.Skill.get_reverse_i",
"line_number": 702,
"usage_type": "call"
},
{
"api_name": "sukutomo.models.Skill",
"line_number": 702,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 702,
"usage_type": "name"
},
{
"api_name": "magi.utils.CuteFormTransform.FlaticonWithText",
"line_number": 705,
"usage_type": "attribute"
},
{
"api_name": "magi.utils.CuteFormTransform",
"line_number": 705,
"usage_type": "name"
},
{
"api_name": "magi.utils.CuteFormTransform.Flaticon",
"line_number": 712,
"usage_type": "attribute"
},
{
"api_name": "magi.utils.CuteFormTransform",
"line_number": 712,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.Card.get_reverse_i",
"line_number": 713,
"usage_type": "call"
},
{
"api_name": "sukutomo.models.Card",
"line_number": 713,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 713,
"usage_type": "name"
},
{
"api_name": "magi.utils.CuteFormTransform.Flaticon",
"line_number": 717,
"usage_type": "attribute"
},
{
"api_name": "magi.utils.CuteFormTransform",
"line_number": 717,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 728,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 729,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 730,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 731,
"usage_type": "call"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 740,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.Card.objects.all",
"line_number": 741,
"usage_type": "call"
},
{
"api_name": "sukutomo.models.Card",
"line_number": 741,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 741,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 742,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 743,
"usage_type": "call"
},
{
"api_name": "sukutomo.forms.CardForm",
"line_number": 745,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.forms",
"line_number": 745,
"usage_type": "name"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 776,
"usage_type": "call"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 777,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 780,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 781,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 782,
"usage_type": "call"
},
{
"api_name": "magi.magicollections.MagiCollection.ItemView",
"line_number": 785,
"usage_type": "attribute"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 785,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 796,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 805,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.string_concat",
"line_number": 807,
"usage_type": "call"
},
{
"api_name": "sukutomo.models.Card",
"line_number": 816,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 816,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.Card",
"line_number": 818,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 818,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.Card",
"line_number": 823,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 823,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.Card",
"line_number": 830,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 830,
"usage_type": "name"
},
{
"api_name": "django.utils.safestring.mark_safe",
"line_number": 837,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 840,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 842,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 853,
"usage_type": "call"
},
{
"api_name": "sukutomo.models.Card",
"line_number": 855,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 855,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 857,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 866,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 875,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 925,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.string_concat",
"line_number": 925,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 929,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 930,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.string_concat",
"line_number": 930,
"usage_type": "call"
},
{
"api_name": "magi.utils.setSubField",
"line_number": 933,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.string_concat",
"line_number": 933,
"usage_type": "call"
},
{
"api_name": "magi.magicollections.MagiCollection.ListView",
"line_number": 937,
"usage_type": "attribute"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 937,
"usage_type": "name"
},
{
"api_name": "sukutomo.forms.CardFilterForm",
"line_number": 938,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.forms",
"line_number": 938,
"usage_type": "name"
},
{
"api_name": "magi.utils.custom_item_template",
"line_number": 939,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 958,
"usage_type": "call"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 961,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 965,
"usage_type": "call"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 968,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 972,
"usage_type": "call"
},
{
"api_name": "magi.utils.staticImageURL",
"line_number": 975,
"usage_type": "call"
},
{
"api_name": "magi.magicollections.MagiCollection.AddView",
"line_number": 980,
"usage_type": "attribute"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 980,
"usage_type": "name"
},
{
"api_name": "magi.magicollections.MagiCollection.EditView",
"line_number": 984,
"usage_type": "attribute"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 984,
"usage_type": "name"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 998,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.Skill.objects.all",
"line_number": 999,
"usage_type": "call"
},
{
"api_name": "sukutomo.models.Skill",
"line_number": 999,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 999,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 1000,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 1001,
"usage_type": "call"
},
{
"api_name": "sukutomo.forms.SkillForm",
"line_number": 1003,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.forms",
"line_number": 1003,
"usage_type": "name"
},
{
"api_name": "magi.utils.CuteFormTransform.Flaticon",
"line_number": 1013,
"usage_type": "attribute"
},
{
"api_name": "magi.utils.CuteFormTransform",
"line_number": 1013,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.Skill.get_reverse_i",
"line_number": 1014,
"usage_type": "call"
},
{
"api_name": "sukutomo.models.Skill",
"line_number": 1014,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 1014,
"usage_type": "name"
},
{
"api_name": "magi.magicollections.MagiCollection.ListView",
"line_number": 1025,
"usage_type": "attribute"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 1025,
"usage_type": "name"
},
{
"api_name": "sukutomo.forms.SkillFilterForm",
"line_number": 1026,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.forms",
"line_number": 1026,
"usage_type": "name"
},
{
"api_name": "magi.utils.custom_item_template",
"line_number": 1027,
"usage_type": "name"
},
{
"api_name": "magi.magicollections.MagiCollection.AddView",
"line_number": 1030,
"usage_type": "attribute"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 1030,
"usage_type": "name"
},
{
"api_name": "magi.magicollections.MagiCollection.EditView",
"line_number": 1034,
"usage_type": "attribute"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 1034,
"usage_type": "name"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 1042,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.Set.objects.all",
"line_number": 1043,
"usage_type": "call"
},
{
"api_name": "sukutomo.models.Set",
"line_number": 1043,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 1043,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 1044,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 1045,
"usage_type": "call"
},
{
"api_name": "sukutomo.forms.SetForm",
"line_number": 1047,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.forms",
"line_number": 1047,
"usage_type": "name"
},
{
"api_name": "magi.utils.CuteFormTransform.Flaticon",
"line_number": 1060,
"usage_type": "attribute"
},
{
"api_name": "magi.utils.CuteFormTransform",
"line_number": 1060,
"usage_type": "name"
},
{
"api_name": "sukutomo.models.Set.get_reverse_i",
"line_number": 1061,
"usage_type": "call"
},
{
"api_name": "sukutomo.models.Set",
"line_number": 1061,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.models",
"line_number": 1061,
"usage_type": "name"
},
{
"api_name": "magi.magicollections.MagiCollection.ListView",
"line_number": 1065,
"usage_type": "attribute"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 1065,
"usage_type": "name"
},
{
"api_name": "sukutomo.forms.SetFilterForm",
"line_number": 1066,
"usage_type": "attribute"
},
{
"api_name": "sukutomo.forms",
"line_number": 1066,
"usage_type": "name"
},
{
"api_name": "magi.utils.custom_item_template",
"line_number": 1067,
"usage_type": "name"
},
{
"api_name": "magi.magicollections.MagiCollection.AddView",
"line_number": 1070,
"usage_type": "attribute"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 1070,
"usage_type": "name"
},
{
"api_name": "magi.magicollections.MagiCollection.EditView",
"line_number": 1074,
"usage_type": "attribute"
},
{
"api_name": "magi.magicollections.MagiCollection",
"line_number": 1074,
"usage_type": "name"
}
] |
160087843
|
# Import necessary libraries
from pyimagesearch.centroidtracker import CentroidTracker
from imutils.video import VideoStream
import imutils
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import numpy as np
import argparse
import cv2
import tkinter
# Construct the argument parse and parse the arguments
ap = argparse.ArgumentParser(
description='Script to run MobileNet-SSD object detection network')
ap.add_argument('-v', '--video', type=str, default='',
help='Path to video file. If empty, web cam stream will be used')
ap.add_argument('-p', '--prototxt', required=True,
help="Path to Caffe 'deploy' prototxt file")
ap.add_argument('-m', '--model', required=True,
help='Path to weights for Caffe model')
ap.add_argument('-l', '--labels', required=True,
help='Path to labels for dataset')
ap.add_argument('-c', '--confidence', type=float, default=0.9,
help='Minimum probability to filter weak detections')
ap.add_argument('-i', '--ignore', required=True,
help="Path to ignore txt")
args = vars(ap.parse_args())
ct = CentroidTracker()
# Initialize class labels of the dataset
CLASSES = [line.strip() for line in open(args['labels'])]
IGNORE = [line.strip() for line in open(args['ignore'])]
objID = []*20
objdistance = []*20
#print('[INFO]', CLASSES)
#distance params
knownWidth =0.7
knownDistance =0.7
pixWidth = 550
focalLength= (pixWidth*knownWidth)/knownDistance
##
initDistance =15;
# Generate random bounding box colors for each class label
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 2))
# Load Caffe model from disk
#print("[INFO] Loading model")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
# Open video capture from file or capture device
def improc(carV,limitD):
Vcar = carV
start = time.process_time()
print("[INFO] Starting video stream")
if args['video']:
cap = cv2.VideoCapture(args['video'])
else:
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))
for fram in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# Capture frame-by-frame
frame = fram.array
(h, w) = frame.shape[:2]
# MobileNet requires fixed dimensions for input image(s)
# so we have to ensure that it is resized to 300x300 pixels.
# set a scale factor to image because network the objects has differents size.
# We perform a mean subtraction (127.5, 127.5, 127.5) to normalize the input;
# after executing this command our "blob" now has the shape:
# (1, 3, 300, 300)
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843, (300, 300), 127.5)
# Pass the blob through the network and obtain the detections and predictions
net.setInput(blob)
detections = net.forward()
rects = []
for i in range(detections.shape[2]):
# Extract the confidence (i.e., probability) associated with the prediction
confidence = detections[0, 0, i, 2]
# Filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence > args["confidence"]:
# Extract the index of the class label from the `detections`,
# then compute the (x, y)-coordinates of the bounding box for
# the object
class_id = int(detections[0, 0, i, 1])
if CLASSES[class_id] in IGNORE:
continue
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
rects.append(box.astype("int"))
(startX, startY, endX, endY) = box.astype('int')
# Draw bounding box for the object
cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[class_id], 2)
# Draw label and confidence of prediction in frame
label = ""#"{}: {:.2f}%".format(CLASSES[class_id], confidence * 100)
#print("[INFO] {}".format(label))
cv2.rectangle(frame, (startX, startY), (endX, endY),
(0,255,0), 2)
y = startY - 15 if startY - 15 > 15 else startY + 15
#Width= endX-startX
#distance = (knownWidth*focalLength)/Width
#label=label+"/ distance:"+str(distance)
objects = ct.update(rects)
#loop
for (objectID, centroid) in objects.items():
# draw both the ID of the object and the centroid of the
# object on the output frame
text = "ID {}".format(objectID)
cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
cv2.putText(frame, label, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5,# COLORS[class_id]
(0), 2)
#seconds = time.time()
#t = time.localtime(seconds)
#print("t1: ", t.tm_sec)
#if t.tm_sec == 0:
cWidth = centroid[3]-centroid[2]
cdistance = float("{0:.2f}".format((knownWidth*focalLength)/cWidth))
#print(cdistance)
elapsed =(time.process_time() - start)
if elapsed > 10:
elapsed = 1
start = time.process_time()
del objID[:]
del objdistance[:]
if objectID not in objID:
objID.append(objectID)
objdistance.append(cdistance)
#print(str(len(objID))+"/"+str(objdistance))
try:
#if len(objID) == len(objdistance) and len(objID)!=0:
#deltaDistance = cdistance - objdistance[objectID]
print(limitD)
if cdistance<limitD:
print("too close!!")
else:
print("good")
#print("ID: "+str(objectID) +" || sec: "+ str(int(elapsed)) +" || Vtot: " + str(Vtot)+ " || Vobj: " + str(Vobj)+ " || Dinit:" + str(float("{0:.2f}".format(objdistance[objectID]))) + " || D:" + str(cdistance));
label = "distance:" + str(cdistance)# +"pix:" + str(cWidth)
cv2.putText(frame, label, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.8,(0,0,255),# COLORS[class_id]
2)
except IndexError as e:
objdistance.append(cdistance)
print(str(objID)+"||"+str(objdistance))
# Show fame
cv2.imshow("Frame", frame)
rawCapture.truncate(0)
key = cv2.waitKey(1) & 0xFF
# Press `q` to exit
if key == ord("q"):
break
# Clean-up
cv2.destroyAllWindows()
| null |
charlie.py
|
charlie.py
|
py
| 8,395 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pyimagesearch.centroidtracker.CentroidTracker",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.random.uniform",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "cv2.dnn.readNetFromCaffe",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "time.process_time",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "picamera.PiCamera",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "picamera.array.PiRGBArray",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "cv2.dnn.blobFromImage",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "cv2.circle",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "time.process_time",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "time.process_time",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 185,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 204,
"usage_type": "call"
}
] |
232810622
|
from binascii import unhexlify
import requests
from twisted.internet.defer import fail
from dappcrowd.database import DAppCrowdDatabase
from dappcrowd.tc_database import DAppCrowdTrustChainDatabase
from pyipv8.ipv8.attestation.trustchain.community import TrustChainCommunity
from pyipv8.ipv8.attestation.trustchain.listener import BlockListener
from pyipv8.ipv8.deprecated.community import Community
from pyipv8.ipv8.peer import Peer
class DAppCrowdTrustchainCommunity(TrustChainCommunity):
master_peer = Peer(unhexlify('3081a7301006072a8648ce3d020106052b81040027038192000404267964a5be4a43ee1e59397c6765'
'8db0dadc276a89163a3b1f7dec3fdb4cecd94dd80968c9983bfffcd2cd58e8ec7ada6dded7ff3b389b'
'85f691ee0e7981326b4b4deb80ad536801d781795a335f501b80c00b479f076f0384fa7fa3bd940f76'
'82840dae77d46f938f49743acb5d2ab723046982608d60f2398853f9898d97e4e3b35fa19eb92f0ba2'
'c1570a31ae72'))
DB_CLASS = DAppCrowdTrustChainDatabase
def __init__(self, *args, **kwargs):
super(DAppCrowdTrustchainCommunity, self).__init__(*args, **kwargs)
self.persistence.my_peer = self.my_peer
def get_github_profile(self, username):
"""
Get the GitHub profile for a given username.
"""
return requests.get("https://api.github.com/users/%s" % username).json()
def import_github_profile(self, username):
"""
Import your GitHub profile.
"""
profile_info = self.get_github_profile(username)
#mid = self.my_peer.mid.encode('hex')
#if not profile_info['bio'] or mid not in profile_info['bio']:
# return fail(RuntimeError("your member ID (%s) should be in the GitHub bio!" % mid))
# Challenge successful, create TrustChain block
tx = {
'platform': 'github',
'info': {
'username': username,
'followers': profile_info['followers']
}
}
return self.create_source_block(block_type='devid_connection', transaction=tx)
def add_skill(self, name):
"""
Add a skill to your developer portfolio.
"""
tx = {
'name': name
}
return self.create_source_block(block_type='devid_skill', transaction=tx)
def endorse_skill(self, public_key, block_num):
"""
Endorse a specific skill of another user, identified by a public key and block number
"""
source_block = self.persistence.get(public_key, block_num)
return self.create_link(source_block)
class DAppCrowdCommunity(Community, BlockListener):
master_peer = Peer(unhexlify('3081a7301006072a8648ce3d020106052b81040027038192000406297d96eafe1f25408ecc44062310'
'67d4d644bf837e051d64fee582788544b360d30f21004eeb7f3425331423c7d5c9cc56ad7358558a43'
'6fd46ac53dc9f25575f4b28a512c8ca002aaab6d820800634f009a8d509e600a9c7f9a171e9d0c3a66'
'd2a823a5f6d6d2bfb5d96c1725163b03242a1e6b7d51ae110d5666d696640f4e3633bd9da346397dcd'
'0dd47bd6fe29'))
def __init__(self, *args, **kwargs):
working_directory = kwargs.pop('working_directory', '')
self.trustchain = kwargs.pop('trustchain')
self.ipfs_api = kwargs.pop('ipfs_api')
super(DAppCrowdCommunity, self).__init__(*args, **kwargs)
self.persistence = DAppCrowdDatabase(working_directory, 'dappcrowd', self.ipfs_api, self.trustchain.persistence)
self.persistence.my_peer = self.my_peer
self.trustchain.add_listener(self, ['dappcoder_project', 'dappcoder_submission', 'dappcoder_review'])
def should_sign(self, block):
if block.type == 'dappcrowd_review_request':
return False # We counter-sign this one manually
return True
def received_block(self, block):
if block.type == 'dappcoder_project' and not self.persistence.has_project(block.public_key, block.transaction['id']):
self.persistence.add_project(block)
if block.type == 'dappcoder_submission' and not self.persistence.has_submission(block.public_key, block.transaction['id']):
self.persistence.add_submission(block)
if block.type == 'dappcoder_review' and not self.persistence.has_review(block.public_key, block.transaction['id']):
self.persistence.add_review(block)
def create_project(self, name, specifications, deadline, reward, currency, min_reviews):
"""
Create a new project.
"""
# Add specifications to IPFS
pointer = self.ipfs_api.add_json({"specifications": specifications})
tx = {
'id': self.persistence.get_next_project_id(self.trustchain.my_peer.public_key.key_to_bin()),
'name': name,
'specifications': pointer,
'deadline': deadline,
'reward': reward,
'currency': currency,
'min_reviews': min_reviews,
'notary_signature': 'a' * 64,
}
return self.trustchain.create_source_block(block_type='dappcoder_project', transaction=tx)
def create_submission(self, project_public_key, project_id, submission):
"""
Create a submission for a given project.
"""
if not self.persistence.has_project(project_public_key, project_id):
raise RuntimeError("This project does not exist.")
# TODO check deadline expiration!
# Add submission to IPFS
pointer = self.ipfs_api.add_json({"submission": submission})
tx = {
'project_pk': project_public_key,
'project_id': project_id,
'id': self.persistence.get_next_submission_id(self.trustchain.my_peer.public_key.key_to_bin()),
'submission': pointer
}
return self.trustchain.create_source_block(block_type='dappcoder_submission', transaction=tx)
def create_review(self, submission_public_key, submission_id, review):
"""
Create a review for a given submission.
"""
# Add review to IPFS
pointer = self.ipfs_api.add_json({"review": review})
tx = {
'submission_pk': submission_public_key,
'submission_id': submission_id,
'id': self.persistence.get_next_review_id(self.trustchain.my_peer.public_key.key_to_bin()),
'review': pointer
}
return self.trustchain.create_source_block(block_type='dappcoder_review', transaction=tx)
def request_review(self, submission_id, requester_pub_key):
"""
Request a review for a submission from another peer.
"""
tx = {
'submission_id': submission_id
}
peer = self.network.get_verified_by_public_key_bin(requester_pub_key)
self.trustchain.sign_block(peer, requester_pub_key, block_type='dappcrowd_review_request', transaction=tx)
def respond_to_review_request(self, block_hash, accept):
"""
Accept/reject a review request for a given block hash
"""
block = self.trustchain.persistence.get_block_with_hash(block_hash)
peer = self.network.get_verified_by_public_key_bin(block.public_key)
self.trustchain.sign_block(peer, linked=block, additional_info={'accept': accept})
def unload(self):
super(DAppCrowdCommunity, self).unload()
# Close the persistence layer
self.persistence.close()
| null |
dappcrowd/community.py
|
community.py
|
py
| 7,596 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pyipv8.ipv8.attestation.trustchain.community.TrustChainCommunity",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pyipv8.ipv8.peer.Peer",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "binascii.unhexlify",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "dappcrowd.tc_database.DAppCrowdTrustChainDatabase",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pyipv8.ipv8.deprecated.community.Community",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "pyipv8.ipv8.attestation.trustchain.listener.BlockListener",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "pyipv8.ipv8.peer.Peer",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "binascii.unhexlify",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "dappcrowd.database.DAppCrowdDatabase",
"line_number": 84,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.