seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
21971690182
|
import json
from collections import OrderedDict
from operator import itemgetter
TEAMS = None
MATCHES = {}
def get_other(t, title):
t1, t2 = title.split("-")
if t in t1:
return get_team(t2)
return get_team(t1)
def get_team(t):
for team in TEAMS:
if team in t:
return team
def main():
global TEAMS, MATCHES
with open("standings.json", "r") as stand:
t_dict = json.loads(stand.read())
TEAMS = OrderedDict((k, t_dict[k]) for k in sorted(t_dict, key=t_dict.get))
with open("splig.json", "r") as sp:
cal = OrderedDict(json.loads(sp.read()))["calendars"][0]["events"]
for t in TEAMS:
MATCHES[t] = []
for i in cal:
if t in i["summary"]:
MATCHES[t].append(TEAMS[get_other(t, i["summary"])])
print(t)
print("11 hafta:" , MATCHES[t][:11], sum(MATCHES[t][:11]))
print("Kalan:", MATCHES[t][11:17], sum(MATCHES[t][11:17]))
print()
if __name__ == "__main__":
main()
|
mfkaptan/fixture-visualizer
|
lig.py
|
lig.py
|
py
| 1,026 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "json.loads",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 28,
"usage_type": "call"
}
] |
72313218748
|
from flask import Flask, jsonify,request,json
from scrapper import scrap_cards
from config import *
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/scrap', methods = ['POST'])
def generate_json():
req = request.get_json(force=True)
print(req['url'])
scrap_cards(req['url'])
data = json.load(open(JSON_FOLDER+'output.json'))
return jsonify(data)
if __name__ == '__main__':
app.run()
|
mage1711/flask-scrapers-api
|
app.py
|
app.py
|
py
| 495 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "scrapper.scrap_cards",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask.json.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask.json",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 18,
"usage_type": "call"
}
] |
13194360101
|
# -*-coding=utf-8-*-
# @Time : 2019/1/28 14:19
# @File : youtube_downloader.py
import subprocess
import sys
import pymongo
import re
import codecs
def extract_link(filename='web.html'):
with codecs.open(filename, 'r', encoding='utf8') as f:
content = f.read()
try:
result = re.findall('\{"videoId":"(\w+)"\}', content)
except Exception as e:
return []
else:
return result
# 从文件下载
def download_from_txt():
db = pymongo.MongoClient('10.18.6.46', port=27001)
doc = db['db_rocky']['youtube']
CMD = 'python you-get {}'
while 1:
with open('youtube_link.txt', 'r') as f:
lines = f.readlines()
lines_copy = lines.copy()
if not lines:
break
for line in lines_copy:
print(line.strip())
# line=line.strip()
is_exists = doc.find({'url': line.strip()})
if list(is_exists):
print('{} is exists !'.format(line))
lines_copy.remove(line)
else:
try:
p = subprocess.Popen(CMD.format(line.strip()), stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True)
output, error = p.communicate()
except Exception as e:
print(e)
continue
else:
lines_copy.remove(line)
print('{} has been downloaded !'.format(line))
try:
doc.insert({'url': line.strip()})
except Exception as e:
print(e)
with open('youtube_link.txt', 'w') as f:
f.writelines(lines_copy)
def download_from_web():
db = pymongo.MongoClient('10.18.6.46', port=27001)
doc = db['db_rocky']['youtube']
id_list = extract_link()
base_url = 'https://www.youtube.com/watch?v={}'
for idx in id_list:
full_url = base_url.format(idx)
cmd='python you-get {}'.format(full_url)
is_exists = doc.find({'url': full_url})
# if list(is_exists):
# print('已经下载过>>>>{}'.format(full_url))
# continue
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True)
output, error = p.communicate()
except Exception as e:
print(e)
continue
else:
print('{} 下载好了!'.format(full_url))
try:
doc.insert({'url': full_url})
except Exception as e:
print(e)
funcition_map={'1':download_from_txt,
'2':download_from_web}
option = sys.argv[1]
funcition_map.get(option)()
print('Done')
|
leegb/online_video_download
|
youtube_downloader.py
|
youtube_downloader.py
|
py
| 2,952 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "codecs.open",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 101,
"usage_type": "attribute"
}
] |
20143445172
|
"""Views for Learning Journal."""
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPFound
from pyramid.httpexceptions import HTTPNotFound
from anna_journal.models import Journals
from pyramid.security import remember, forget
from anna_journal.security import check_credentials
@view_config(route_name='login', renderer='../templates/login.jinja2')
def login(request):
"""The login in view for our admin."""
if request.method == 'POST':
username = request.params.get('username', '')
password = request.params.get('password', '')
if check_credentials(username, password):
headers = remember(request, username)
return HTTPFound(location=request.route_url('list_view'), headers=headers)
return {}
@view_config(route_name='logout')
def logout(request):
headers = forget(request)
return HTTPFound(request.route_url('list_view'), headers=headers)
@view_config(route_name='list_view', renderer='../templates/index.jinja2')
def list_view(request):
"""Display list of journal entries."""
JOURNALS = request.dbsession.query(Journals).all()
return {
'journals': JOURNALS
}
@view_config(route_name='detail_view', renderer='../templates/detail.jinja2')
def detail_view(request):
"""View single journal entry."""
entry_id = int(request.matchdict['id'])
entry = request.dbsession.query(Journals).get(entry_id)
return {
'entry': entry
}
@view_config(route_name='create_view', renderer='../templates/form.jinja2', permission='secret', require_csrf=True)
def create_view(request):
"""Create a new view."""
if request.method == "POST" and request.POST:
if request.POST['title'] and request.POST['body']:
form_data = request.POST
new_entry = Journals(
title=form_data['title'],
body=form_data['body'],
creation_date=datetime.now(),
)
request.dbsession.add(new_entry)
return HTTPFound(location=request.route_url('list_view'))
return request.POST
@view_config(
route_name='update_view', renderer='../templates/form_edit.jinja2', permission='secret', require_csrf=True)
def update_view(request):
"""Update an existing view."""
entry_id = int(request.matchdict['id'])
entry = request.dbsession.query(Journals).get(entry_id)
if not entry:
return HTTPNotFound
if request.method == "GET":
return {
'title': entry.title,
'body': entry.body
}
if request.method == "POST":
form_data = request.POST
entry.title = form_data['title']
entry.body = form_data['body']
request.dbsession.flush()
return HTTPFound(location=request.route_url('detail_view', id=entry_id))
|
Bonanashelby/pyramid-learning-journal
|
anna_journal/anna_journal/views/default.py
|
default.py
|
py
| 2,855 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "anna_journal.security.check_credentials",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pyramid.security.remember",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pyramid.httpexceptions.HTTPFound",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pyramid.view.view_config",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pyramid.security.forget",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pyramid.httpexceptions.HTTPFound",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pyramid.view.view_config",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "anna_journal.models.Journals",
"line_number": 31,
"usage_type": "argument"
},
{
"api_name": "pyramid.view.view_config",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "anna_journal.models.Journals",
"line_number": 41,
"usage_type": "argument"
},
{
"api_name": "pyramid.view.view_config",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "anna_journal.models.Journals",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "pyramid.httpexceptions.HTTPFound",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pyramid.view.view_config",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "anna_journal.models.Journals",
"line_number": 70,
"usage_type": "argument"
},
{
"api_name": "pyramid.httpexceptions.HTTPNotFound",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "pyramid.httpexceptions.HTTPFound",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "pyramid.view.view_config",
"line_number": 65,
"usage_type": "call"
}
] |
19672334600
|
import pytest
from bfprt.algo import insertion_sort, partition, select, swap
class TestInternal:
def test_swap(self):
items = [4, 1, 2, 5, 9, 8]
swap(items, 2, 3)
assert items == [4, 1, 5, 2, 9, 8]
@pytest.mark.parametrize("items, pivot_index, expected_items, expected_index", [
([4, 2, 1, 9, 5, 8], 0, [2, 1, 4, 9, 5, 8], 2),
([4, 2, 1, 9, 5, 8], 4, [4, 2, 1, 5, 8, 9], 3),
([2, 1], 0, [1, 2], 1),
([2, 1], 1, [1, 2], 0),
([3, 2, 1], 1, [1, 2, 3], 1),
])
def test_partition(self, items, pivot_index, expected_items, expected_index):
pivot_index = partition(items, 0, len(items) - 1, pivot_index)
assert pivot_index == expected_index
assert items == expected_items
def test_select(self):
for i in range(6):
items = [4, 2, 1, 9, 5, 8]
selected = select(items, 0, 5, i)
sorted = [1, 2, 4, 5, 8, 9]
assert selected == sorted[i]
def test_insertion_sort(self):
items = [4, 2, 9, 5, 8]
insertion_sort(items, 0, 4)
assert items == [2, 4, 5, 8, 9]
|
gregorybchris/bfprt
|
tests/test_internal.py
|
test_internal.py
|
py
| 1,139 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "bfprt.algo.swap",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "bfprt.algo.partition",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "bfprt.algo.select",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "bfprt.algo.insertion_sort",
"line_number": 34,
"usage_type": "call"
}
] |
71804916988
|
from __future__ import print_function
import argparse
import torch
from torch import nn, optim
from torch.autograd import Variable
from torch.nn import functional as F
from config import params, data
class VAE(nn.Module):
def __init__(self):
super(VAE, self).__init__()
self.fc1 = nn.Linear(params["MAX_SENT_LEN"], 400)
self.fc21 = nn.Linear(400, 20)
self.fc22 = nn.Linear(400, 20)
self.fc3 = nn.Linear(20, 400)
self.fc4 = nn.Linear(400, params["MAX_SENT_LEN"])
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def encode(self, x):
h1 = self.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
# print(logvar)
# print(mu)
# print(self.training)
# if self.training:
# print(logvar)
# std = logvar.mul(0.5).exp_()
# eps = Variable(std.data.new(std.size()).normal_())
# # print(eps)
# # print(eps)
# print(std)
# a = eps.mul(std)
# # print(a)
# # a = eps.mul(std).add_(mu)
# # print(a)
# return a
# else:
# # print(mu)
# print(mu)
return mu
def decode(self, z):
h3 = self.relu(self.fc3(z))
# print(h3)
return self.sigmoid(self.fc4(h3))
def forward(self, x):
mu, logvar = self.encode(x.view(-1, 59))
z = self.reparameterize(mu, logvar)
# print(z)
return self.decode(z), mu, logvar
def loss_function(recon_x, x, mu, logvar):
# print(recon_x)
# print(x)
BCE = F.binary_cross_entropy(recon_x, x.view(-1, 59))
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
# KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
# Normalise by same number of elements as in reconstruction
# KLD /= params["BATCH_SIZE"] * 59
KLD = 0
return BCE + KLD
def train(model):
optimizer = optim.Adam(model.parameters(), lr=1e-3)
for epoch in range(params["EPOCH"]):
for i in range(0, len(data["train_x"]), params["BATCH_SIZE"]):
batch_range = min(params["BATCH_SIZE"], len(data["train_x"]) - i)
batch_x = [[data["word_to_idx"][w] for w in sent] +
[params["VOCAB_SIZE"] + 1] *
(params["MAX_SENT_LEN"] - len(sent))
for sent in data["train_x"][i:i + batch_range]]
model.train()
train_loss = 0
feature = Variable(torch.FloatTensor(batch_x))
if params["CUDA"]:
feature = feature.cuda()
optimizer.zero_grad()
recon_batch, mu, logvar = model(feature)
# print(recon_batch)
loss = loss_function(recon_batch, feature, mu, logvar)
loss.backward()
train_loss += loss.data[0]
optimizer.step()
print('Train Epoch: {} \tLoss: {:.6f}'.format(
epoch,loss.data[0] / len(feature)))
def test(model):
model.eval()
test_loss = 0
for i in range(0, len(data["test_x"]), params["BATCH_SIZE"]):
batch_range = min(params["BATCH_SIZE"], len(data["test_x"]) - i)
batch_x = [[data["word_to_idx"][w] for w in sent] +
[params["VOCAB_SIZE"] + 1] *
(params["MAX_SENT_LEN"] - len(sent))
for sent in data["test_x"][i:i + batch_range]]
data = Variable(torch.FloatTensor(batch_x), volatile=True)
if params["CUDA"]:
data = data.cuda()
recon_batch, mu, logvar = model(data)
test_loss += loss_function(recon_batch, data, mu, logvar).data[0]
test_loss /= len(data["text_x"])
print('====> Test set loss: {:.4f}'.format(test_loss))
|
hoxmark/Deep_reinforcement_active_learning
|
selection_strategies/models/vae.py
|
vae.py
|
py
| 3,987 |
python
|
en
|
code
| 17 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "config.params",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "config.params",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sigmoid",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.binary_cross_entropy",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "config.params",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "config.data",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "config.params",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "config.params",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "config.data",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "config.data",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "config.params",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "config.params",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "config.data",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "config.params",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "config.data",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "config.params",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "config.params",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "config.data",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "config.data",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "config.params",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "config.params",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "config.data",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "config.data",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "config.params",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "config.data",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "config.data.cuda",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "config.data",
"line_number": 122,
"usage_type": "argument"
},
{
"api_name": "config.data",
"line_number": 123,
"usage_type": "argument"
},
{
"api_name": "config.data",
"line_number": 125,
"usage_type": "name"
}
] |
39255470036
|
from django.conf import settings
from django.core.cache import cache
from django.utils import timezone
from proco.utils.tasks import update_cached_value
class SoftCacheManager(object):
CACHE_PREFIX = 'SOFT_CACHE'
def get(self, key):
value = cache.get('{0}_{1}'.format(self.CACHE_PREFIX, key), None)
if value:
if (
(value['expired_at'] and value['expired_at'] < timezone.now().timestamp())
or value.get('invalidated', True)
) and value.get('request_path', None):
update_cached_value.delay(url=value['request_path'])
return value['value']
def _invalidate(self, key):
value = cache.get(key, None)
if value:
value['invalidated'] = True
cache.set(key, value, None)
def invalidate_many(self, keys):
for key in keys:
self.invalidate(key)
def invalidate(self, key='*'):
if isinstance(key, str):
keys = cache.keys('{0}_{1}'.format(self.CACHE_PREFIX, key))
for key in keys:
self._invalidate(key)
elif isinstance(key, (list, tuple)):
self.invalidate_many(key)
def set(self, key, value, request_path=None, soft_timeout=settings.CACHES['default']['TIMEOUT']):
cache.set('{0}_{1}'.format(self.CACHE_PREFIX, key), {
'value': value,
'invalidated': False,
'request_path': request_path,
'expired_at': (timezone.now().timestamp() + soft_timeout) if soft_timeout else None,
}, None)
cache_manager = SoftCacheManager()
|
unicef/Project-Connect-BE
|
proco/utils/cache.py
|
cache.py
|
py
| 1,625 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "django.core.cache.cache.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.core.cache.cache",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "proco.utils.tasks.update_cached_value.delay",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "proco.utils.tasks.update_cached_value",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django.core.cache.cache.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.core.cache.cache",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.core.cache.cache.set",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.core.cache.cache",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.core.cache.cache.keys",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.core.cache.cache",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.CACHES",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "django.core.cache.cache.set",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "django.core.cache.cache",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 45,
"usage_type": "name"
}
] |
74637089147
|
# TODO: (only sent audio, still need sync) receive audio packets and sync with video
# DONE: try to connect to host AFTER clicking on 'start' button
# TODO: fix crash when video is ended or trying to reconnect
import base64
import os
import socket
import sys
import numpy as np
from PyQt5 import QtGui, QtCore, QtWidgets
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.uic import loadUi
from PyQt5.QtCore import pyqtSlot, QTimer, QObject, pyqtSignal, QThread
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QFileDialog, QLabel, QGraphicsScene, QGraphicsView
import cv2
from datetime import timedelta
import queue
import time
import logging, random, imutils
import os
import pyaudio, wave, subprocess
import errno
import pickle
import threading
logging.basicConfig(format="%(message)s", level=logging.INFO)
class PlayVideo(QThread):
def __init__(self, frame, fpsLabel, threadChat, playButton, stopButton, chat_socket,
progressBar, progresslabel):
super().__init__()
self.frame = frame
self.fpsLabel = fpsLabel
self.playButton = playButton
self.stopButton = stopButton
self.progressBar = progressBar
self.progresslabel = progresslabel
self.timer = QTimer()
self.timer.timeout.connect(self.playVideo)
self.timer.start(0.5)
self.threadChat = threadChat
self.playButton.clicked.connect(self.playTimer)
self.stopButton.clicked.connect(self.stopTimer)
self.fps, self.st, self.frames_to_count, self.cnt = (0, 0, 20, 0)
self.BUFF_SIZE = 65536
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.BUFF_SIZE)
self.socket_address = ('192.168.0.106', 9685) # client ip
print('Reading from:', self.socket_address)
self.client_socket.bind(self.socket_address)
self.client_socket.setblocking(False)
self.progressBar.sliderPressed.connect(self.when_slider_pressed)
self.progressBar.sliderReleased.connect(self.moveProgressBar)
self.chat_socket = chat_socket
self.slider_pressed = False
self.set_total_frames = False
def frame_to_timestamp(self, frame, fps):
return str(timedelta(seconds=(frame / fps)))
def send_message(self, message):
message = '{}: {}'.format(self.threadChat.nickname, message)
self.chat_socket.send(message.encode('ascii'))
def playTimer(self):
# start timer
self.send_message('/play')
def stopTimer(self):
# stop timer
self.send_message('/pause')
def when_slider_pressed(self):
self.slider_pressed = True
def moveProgressBar(self):
value = self.progressBar.value()
self.send_message('/skipto ' + str(value))
self.slider_pressed = False
def playVideo(self):
try:
packet_ser, _ = self.client_socket.recvfrom(self.BUFF_SIZE)
packet = pickle.loads(packet_ser)
# TODO: receive total_frames and real_fps from the chat TCP socket only once
# can't since server can open different video file and client metadata doesn't update
# consider sending total_frames and real_fps to client over TCP chat everytime we change the file
current_frame_no = packet["frame_nb"]
total_frames = packet["total_frames"]
real_fps = packet["fps"]
if not self.set_total_frames:
self.progressBar.setMinimum(0)
self.progressBar.setMaximum(total_frames)
self.set_total_frames = True
if self.slider_pressed is False:
self.progressBar.setValue(current_frame_no)
progress = self.frame_to_timestamp(current_frame_no, real_fps) + ' / ' \
+ self.frame_to_timestamp(total_frames, real_fps)
self.progresslabel.setText(progress)
data = base64.b64decode(packet["frame"], ' /')
npdata = np.fromstring(data, dtype=np.uint8)
frame = cv2.imdecode(npdata, 1)
# convert image to RGB format
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# get image infos
height, width, channel = frame.shape
# print(height, width, channel)
step = channel * width
# create QImage from image
qImg = QImage(frame.data, width, height, step, QImage.Format_RGB888)
self.frame.setPixmap(QPixmap.fromImage(qImg))
self.fpsLabel.setText(str(round(self.fps, 1)))
if self.cnt == self.frames_to_count:
try:
self.fps = round(self.frames_to_count / (time.time() - self.st))
self.st = time.time()
self.cnt = 0
except:
pass
self.cnt += 1
# because of socket being non-blocking
# we must pass the error when not receiving frames (video is paused)
except BlockingIOError:
pass
except Exception as e:
logging.error(e)
# print('received')
def quit(self):
print('closed thread')
class TcpChat(QThread):
def __init__(self, chat_socket):
super().__init__()
self.nickname = 'test_user' # input("Choose your nickname: ")
self.client = chat_socket
self.client.connect(('192.168.0.106', 7976)) # connecting client to server
# self.client.setblocking(False)
def receive(self):
while True: # making valid connection
try:
message = self.client.recv(1024).decode('ascii')
if message == 'NICKNAME':
self.client.send(self.nickname.encode('ascii'))
else:
print(message) # received in bytes
except Exception as e: # case on wrong ip/port details
print("An error occured on the server side!")
logging.error(e)
self.client.close()
break
def write(self):
while True: # message layout
message = '{}: {}'.format(self.nickname, input(''))
self.client.send(message.encode('ascii'))
def run(self):
receive_thread = threading.Thread(target=self.receive) # receiving multiple messages
receive_thread.start()
write_thread = threading.Thread(target=self.write) # sending messages
write_thread.start()
class AudioRec(QThread):
def __init__(self):
super().__init__()
self.host_name = socket.gethostname()
self.host_ip = '192.168.0.106' # client ip
print(self.host_ip)
self.port = 9631
# For details visit: www.pyshine.com
self.q = queue.Queue(maxsize=100)
self.BUFF_SIZE = 65536
self.audio_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.audio_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.BUFF_SIZE)
self.socket_address = (self.host_ip, self.port)
self.audio_socket.bind(self.socket_address)
self.p = pyaudio.PyAudio()
self.CHUNK = 1024
self.stream = self.p.open(format=self.p.get_format_from_width(2),
channels=2,
rate=44100,
output=True,
frames_per_buffer=self.CHUNK)
self.timer = QTimer()
self.timer.timeout.connect(self.playAudio)
self.timer.start(1000 * 0.8 * self.CHUNK / 44100)
t1 = threading.Thread(target=self.getAudioData, args=())
t1.start()
print('Now Playing...')
def getAudioData(self):
while True:
try:
self.frame, _ = self.audio_socket.recvfrom(self.BUFF_SIZE)
self.q.put(self.frame)
except BlockingIOError:
pass
except Exception as e:
logging.error(e)
def playAudio(self):
if not self.q.empty():
frame = self.q.get()
self.stream.write(frame)
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
loadUi('open_client.ui', self)
self.frame.setScaledContents(True)
self.setWindowTitle('OpenParty Client')
self.totalFrames = 0
self.fps = 0
self.threadVideoGen = QThread()
self.threadVideoPlay = QThread()
self.threadAudio = QThread()
self.threadChat = QThread()
self.readHost.clicked.connect(self.startAllThreads)
self.HEADER_LENGTH = 10
self.chat_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.chat_started = False
def startAllThreads(self):
if not self.chat_started:
self.startTcpChat()
self.chat_started = True
if not self.threadAudio.isRunning():
self.startAudio()
if not self.threadVideoPlay.isRunning():
self.startVideoPlay()
def closeEvent(self, event):
print('closed manually')
self.chat_socket.close()
self.threadVideoPlay.terminate()
self.threadAudio.terminate()
self.threadChat.terminate()
os._exit(1)
def startVideoPlay(self):
self.threadVideoPlay = PlayVideo(self.frame, self.fpsLabel, self.threadChat,
self.playButton, self.stopButton,
self.chat_socket,
self.progressBar, self.progresslabel)
self.threadVideoPlay.start()
def startAudio(self):
self.threadAudio = AudioRec()
self.threadAudio.start()
def startTcpChat(self):
self.threadChat = TcpChat(self.chat_socket)
self.threadChat.start()
app = QApplication(sys.argv)
widget = MainWindow()
widget.show()
sys.exit(app.exec_())
|
shully899509/OpenParty
|
pyqt player client.py
|
pyqt player client.py
|
py
| 10,093 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.QThread",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.QTimer",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_DGRAM",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "socket.SOL_SOCKET",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "socket.SO_RCVBUF",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "pickle.loads",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "base64.b64decode",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "numpy.fromstring",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "cv2.imdecode",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtGui.QImage",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtGui.QImage.Format_RGB888",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtGui.QPixmap.fromImage",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtGui.QPixmap",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QThread",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "logging.error",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QThread",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "socket.gethostname",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "queue.Queue",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 201,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_DGRAM",
"line_number": 201,
"usage_type": "attribute"
},
{
"api_name": "socket.SOL_SOCKET",
"line_number": 202,
"usage_type": "attribute"
},
{
"api_name": "socket.SO_RCVBUF",
"line_number": 202,
"usage_type": "attribute"
},
{
"api_name": "pyaudio.PyAudio",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QTimer",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 237,
"usage_type": "name"
},
{
"api_name": "PyQt5.uic.loadUi",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QThread",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QThread",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QThread",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QThread",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 252,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 252,
"usage_type": "attribute"
},
{
"api_name": "os._exit",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 288,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 291,
"usage_type": "call"
}
] |
19528069970
|
# -*- coding: utf-8 -*-
# 基础公共模块
__author__='zhaicao'
import pymssql
from PyQt5 import QtWidgets, QtGui, QtCore
from PyQt5.QtCore import Qt
import winreg
import os
import sys
import webbrowser
# SqlServer访问类
class MSSQL:
def __init__(self,**kwargs):
self.dbInfo = kwargs
def __GetConnect(self):
self.conn = pymssql.connect(**self.dbInfo,charset = "utf8")
cur = self.conn.cursor()
if not cur:
raise(NameError,"连接数据库失败")
else:
return cur
#返回查询结果
def ExecQuery(self,sql):
cur = self.__GetConnect()
cur.execute(sql)
resList = cur.fetchall()
self.conn.close()
return resList
# 执行sql
def ExecNonQuery(self,sql):
cur = self.__GetConnect()
cur.execute(sql)
self.conn.commit()
self.conn.close()
# 对象库,封装对象的基本操作
class ObjRepository(object):
def __init__(self, widgetObj, *objDict):
self.__widgetObj = widgetObj
self.__objDict = dict()
# dict取并集
for i in objDict:
self.__objDict = dict(self.__objDict,**i)
def getObjByName(self, objName):
return self.__widgetObj.findChild(self.__objDict[objName]['objType'], objName)
def getObjTextByName(self, objName):
obj = self.__widgetObj.findChild(self.__objDict[objName]['objType'], objName)
if isinstance(obj, QtWidgets.QComboBox):
return obj.currentText()
elif isinstance(obj, QtWidgets.QLineEdit):
return obj.text()
elif isinstance(obj, QtWidgets.QCheckBox):
return obj.checkState() == Qt.Checked
else:
return None
# 获得下拉框bool类型的值,仅支持两个选择的下拉框
def getObjBoolByName(self, objName):
obj = self.__widgetObj.findChild(self.__objDict[objName]['objType'], objName)
return bool(obj.currentIndex())
def getWidgetObj(self):
return self.__widgetObj
def setObjEnabled(self, objName, state):
self.__widgetObj.findChild(self.__objDict[objName]['objType'], objName).setEnabled(state)
def getTextByObj(self, obj):
if isinstance(obj, QtWidgets.QComboBox):
return obj.currentText()
elif isinstance(obj, QtWidgets.QLineEdit):
return obj.text()
elif isinstance(obj, QtWidgets.QCheckBox):
return obj.checkState() == Qt.Checked
else:
return None
def setObjTextByName(self, objName, text):
obj = self.__widgetObj.findChild(self.__objDict[objName]['objType'], objName)
if isinstance(obj, QtWidgets.QLineEdit):
obj.setText(text)
# 基础公用类
class Util(object):
# 提示确认消息,自定义消息框
@classmethod
def mesInfomation(self, widgetObj, message,title = '提示', *args):
mesbox = QtWidgets.QMessageBox(widgetObj)
mesbox.setWindowTitle(title)
mesbox.setText(message)
# 判断按钮,有则加入按钮
if len(args) == 0:
mesbox.addButton('好', QtWidgets.QMessageBox.ActionRole)
else:
for i in args:
mesbox.addButton(i, QtWidgets.QMessageBox.ActionRole)
mesbox.exec_()
return mesbox
# classmethod
@classmethod
def writeFile(cls, filepath, fileData, connector=':'):
f = open(filepath, 'w')
try:
for i, k in fileData.items():
f.write('%s%s %s' % (str(i), str(connector), str(k)) + '\n')
except Exception as e:
print(e)
return False
finally:
f.close()
return True
# 配置dict转str
@classmethod
def dictTransforStr(cls, confList, connector=':'):
reStr = ''
for k,v in confList.items():
reStr += '%s%s %s' % (str(k), str(connector), str(v)) + '\n'
return reStr
# 获得Win桌面路径
@classmethod
def getWinDesktop(cls):
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, \
r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders', )
return winreg.QueryValueEx(key, "Desktop")[0]
# 复制文本至剪贴板
@classmethod
def copyClipboardText(self, Text):
clipboard = QtWidgets.QApplication.clipboard()
clipboard.setText(str(Text))
# 复制图像至剪贴板
@classmethod
def copyClipboardImage(self, image):
clipboard = QtWidgets.QApplication.clipboard()
clipboard.setPixmap(QtGui.QPixmap(os.path.join(os.path.dirname(__file__), image)))
# 复制HTML至剪贴板
@classmethod
def copyHtml(self, html):
mimeData = QtCore.QMimeData()
mimeData.setHtml(html)
clipboard = QtWidgets.QApplication.clipboard()
clipboard.setMimeData(mimeData)
# 打开指定页面
# path为空则使用默认浏览器
# 若找不到应用浏览器,则打开默认浏览器
@classmethod
def openUrl(self, url, path=None):
if path:
chromePath = r'C:\Program Files (x86)\Google\Chrome\Application\chrome.exe'
if os.path.exists(chromePath):
webbrowser.register('chrome', None, webbrowser.BackgroundBrowser(chromePath))
webbrowser.get('chrome').open(url, new=1, autoraise=True)
else:
webbrowser.open(url, new=1, autoraise=True)
else:
webbrowser.open(url, new=1, autoraise=True)
if path == None:
webbrowser.open(url, new=1, autoraise=True)
else:
chromePath = r'C:\Program Files (x86)\Google\Chrome\Application\chrome.exe'
if os.path.exists(chromePath):
webbrowser.register('chrome', None, webbrowser.BackgroundBrowser(chromePath))
webbrowser.get('chrome').open(url, new=1, autoraise=True)
else:
webbrowser.open(url, new=1, autoraise=True)
# 相对路径转绝对路径
# 参数paths: 绝对路径的目录,多参数
# 返回绝对路径
@classmethod
def getAbsPath(self, *paths):
if getattr(sys, 'frozen', False):
dir = os.path.dirname(sys.executable)
elif __file__:
dir = os.path.dirname(__file__)
return os.path.join(dir, *paths)
# 日志记录
@classmethod
def log(self, context):
import codecs
with codecs.open(self.getAbsPath('log.txt'), 'a', 'gbk') as file:
file.write(context)
file.write('\n')
if __name__ == '__main__':
a = "python"
Util().setClipboardText(a)
print(Util.getClipboardText())
|
zhaicao/pythonWorkspace
|
DeployTool/eventAction/Utils.py
|
Utils.py
|
py
| 6,744 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pymssql.connect",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QComboBox",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLineEdit",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QCheckBox",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.Checked",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QComboBox",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLineEdit",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QCheckBox",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.Checked",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLineEdit",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "winreg.OpenKey",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "winreg.HKEY_CURRENT_USER",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "winreg.QueryValueEx",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication.clipboard",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QApplication.clipboard",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtGui.QPixmap",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtGui",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QMimeData",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QApplication.clipboard",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "webbrowser.register",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "webbrowser.BackgroundBrowser",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "webbrowser.get",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "webbrowser.open",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "webbrowser.open",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "webbrowser.open",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "webbrowser.register",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "webbrowser.BackgroundBrowser",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "webbrowser.get",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "webbrowser.open",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "sys.executable",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 195,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "codecs.open",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "{'codecs': 'codecs'}.getClipboardText",
"line_number": 210,
"usage_type": "call"
}
] |
19054013888
|
"""
Scripts to align sequences and transoform them into 1-hot encoding
"""
# Author: Alessio Milanese <[email protected]>
import shutil
import time
import subprocess
import shlex
import os
import errno
import sys
import tempfile
import numpy as np
import re
from stag.helpers import is_tool, read_fasta
#===============================================================================
# FUNCTIONS
#===============================================================================
# ------------------------------------------------------------------------------
# function to convert the nucleotide alignment into 1-hot encoding.
# Note that we select only the nucleotides that corresponds to the inner state
# of the HMM.
encoding_dic = {
"A": [0, 0, 0, 0, 1],
"C": [0, 0, 0, 1, 0],
"G": [0, 0, 1, 0, 0],
"T": [0, 1, 0, 0, 0],
"U": [0, 1, 0, 0, 0],
"others": [1, 0, 0, 0, 0]
}
def convert_alignment(alignment, verbose, as_numpy=False):
n_aligned_characters, n_char = 0, 0
converted_ali = list()
for character in alignment:
# 1-hot encoding
# the ACGTU are converted, everything else that is upper case, is considered
# as a gap ('-').
# for example also 'N' is converted to "-" -> "1,0,0,0,0"
# Note that the upper case letters and "-" represents alignment to the
# hidden state of the HMM.
if not character.islower():
n_char += 1
encoded_block = encoding_dic.get(character) #, encoding_dic["others"])
if encoded_block: #not encoded_block[0]:
# others' high bit = 1
n_aligned_characters += 1
else:
encoded_block = encoding_dic["others"]
converted_ali.extend(encoded_block)
#if as_numpy:
# converted_ali = np.array(list(map(bool, converted_ali)), dtype=bool)
return np.array(converted_ali, dtype=bool), n_aligned_characters / n_char * 100
# function that transform a protein MSA to a nucleotide MSA --------------------
# if check_length is True, then we check that
# len(protein) == len(gene)*3 OR len(protein)-3 == len(gene)*3
def protein2gene_alignment(gene_id, protein_alignment, gene_sequence, check_length=True):
# check that the length is correct
only_AA_from_ali = re.sub(r'\-', '', protein_alignment)
if check_length:
expected_gene_length = len(only_AA_from_ali) * 3
# check if lengths of gene and protein sequence match, with or without stop codon
if len(gene_sequence) != expected_gene_length and len(gene_sequence) - 3 != expected_gene_length:
sys.stderr.write("Error, length of genes/alignment is not correct")
sys.stderr.write(" (protein: "+str(len(only_AA_from_ali)*3)+", gene: "+str(len(gene_sequence))+")\n")
sys.stderr.write(" ID: "+gene_id+"\n")
return None
# convert alignment
pos_gene, al_gene = 0, list()
for res in protein_alignment:
found = False
if res == "-":
al_gene.append("---")
found = True
elif res.isupper():
al_gene.extend(gene_sequence[pos_gene:pos_gene + 3])
pos_gene += 3
found = True
elif res.islower():
found = True
# since we have to remove the lower case letter, we do not
# add those to the alignment, but we anyway increase pos_gene
pos_gene += 3
if not found:
sys.stderr.write("Error, character not identified\n")
return "".join(al_gene)
# ------------------------------------------------------------------------------
# main function as a generator
def align_generator(seq_file, protein_file, hmm_file, use_cmalign, n_threads, verbose, return_numpy, min_perc_state):
"""Align sequences and transform them into 1-hot encoding, ready for
classification.
Parameters
----------
seq_file: file with the nucleotide sequences [string]
protein_file: file with the protein sequences [string or None]
hmm_file: file with the hmm model [string]
use_cmalign: if True, we use cmalign. If false, we use hmmalign [bool]
n_threads: number of threads to use for cmalign (hmmalign can run only
on one thread) [string/int]
verbose: how much info to print [int]
return_numpy: True if you want to return a numpy array instead of a string
Returns
-------
Returns a generator with:
(fasta_id, aligned_sequence) tuples
"""
# number of sequences that pass and sont pass the filter
n_pass, n_not_pass = 0, 0
# check that the tools are available
if use_cmalign and not is_tool("cmalign"):
raise ValueError("[E::align] Error: cmalign is not in the path. Please install Infernal.")
elif not is_tool("hmmalign"):
raise ValueError("[E::align] Error: hmmalign is not in the path. Please install HMMER3.")
if not is_tool("esl-reformat"):
raise ValueError("[E::align] Error: esl-reformat is not in the path. Please install Easel.")
aligner = f"cmalign --cpu {n_threads}" if use_cmalign else "hmmalign"
seq_input = protein_file if protein_file else seq_file
align_cmd = f"{aligner} {hmm_file} {seq_input}"
if verbose > 4:
print(f"Command used to align the sequences: {align_cmd}", file=sys.stderr)
# run the command
CMD = shlex.split(align_cmd)
align_cmd = subprocess.Popen(CMD, stdout=subprocess.PIPE,)
# command to parse the alignment from STOCKHOLM to fasta format
cmd2 = "esl-reformat a2m -"
CMD2 = shlex.split(cmd2)
parse_cmd = subprocess.Popen(CMD2, stdin=align_cmd.stdout, stdout=subprocess.PIPE,)
if protein_file:
seq_stream = zip(read_fasta(parse_cmd.stdout, head_start=1),
read_fasta(open(seq_file), is_binary=False, head_start=1))
else:
seq_stream = read_fasta(parse_cmd.stdout, head_start=1)
for item in seq_stream:
if protein_file:
(pid, pseq), (gid, gseq) = item
if pid != gid:
sys.stderr.write("[E::align] Error. protein and gene identifiers {} {} don't match.".format(pid, gid))
sys.exit(1)
gseq = protein2gene_alignment(gid, pseq, gseq, check_length=True)
else:
gid, gseq = item
converted_ali, perc_aligned_characters = convert_alignment(gseq, verbose, as_numpy=return_numpy)
if perc_aligned_characters >= min_perc_state:
n_pass += 1
yield gid, converted_ali
else:
n_not_pass += 1
# check that hmmalign/cmalign finished correctly
align_cmd.stdout.close()
return_code = align_cmd.wait()
if return_code:
raise ValueError("[E::align] Error. hmmalign/cmalign failed.")
# check that converting the file worked correctly
parse_cmd.stdout.close()
return_code = parse_cmd.wait()
if return_code:
raise ValueError("[E::align] Error. esl-reformat failed.")
# print the number of sequences that were filtered
if verbose > 3:
print(f" Number of sequences that pass the filter: {n_pass}", file=sys.stderr)
print(f" Number of sequences that do not pass the filter: {n_not_pass}", file=sys.stderr)
# ------------------------------------------------------------------------------
# main function
def align_file(seq_file, protein_file, hmm_file, use_cmalign, n_threads, verbose, res_file, min_perc_state):
"""Align sequences and transform them into 1-hot encoding, ready for
classification.
Parameters
----------
seq_file : file with the nucleotide sequences [string]
protein_file: file with the protein sequences [string or None]
hmm_file : file with the hmm model [string]
use_cmalign : if True, we use cmalign. If false, we use hmmalign [bool]
n_threads: number of threads to use for cmalign (hmmalign can run only
on one thread) [string/int]
verbose: how much info to print [int]
res_file: where to save the result.
Returns
-------
It will save the aligned sequences to the specified file.
"""
temp_file = tempfile.NamedTemporaryFile(delete=False, mode="w")
os.chmod(temp_file.name, 0o644)
with temp_file:
for gid, ali in align_generator(seq_file, protein_file, hmm_file, use_cmalign,
n_threads, verbose, False, min_perc_state):
print(gid, *map(int, ali), sep="\t", file=temp_file)
try:
temp_file.flush()
os.fsync(temp_file.fileno())
except:
raise ValueError("[E::align] Error when saving the resulting file.")
try:
shutil.move(temp_file.name, res_file)
except:
raise ValueError(f"[E::align] The resulting file couldn't be saved. You can find the file here:\n{temp_file.name}.")
|
zellerlab/stag
|
stag/align.py
|
align.py
|
py
| 8,980 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "numpy.array",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "stag.helpers.is_tool",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "stag.helpers.is_tool",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "stag.helpers.is_tool",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "shlex.split",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "shlex.split",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "stag.helpers.read_fasta",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "stag.helpers.read_fasta",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "stag.helpers.read_fasta",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 180,
"usage_type": "attribute"
},
{
"api_name": "tempfile.NamedTemporaryFile",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "os.chmod",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "os.fsync",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 216,
"usage_type": "call"
}
] |
29259984179
|
import os
import sys
from datetime import datetime
from glob import glob
from re import split
from numpy import asarray, savetxt
class iGrav:
# find all the .tsf inside the input directory (even in the sub directory)
def get_all_tfs(self, input_folder):
paths_list = glob(input_folder + "/**/*.tsf", recursive=True)
if len(paths_list) <= 0:
print("[ERROR]: There's no .tsf file")
sys.exit()
else:
return paths_list
# from a .tsf file get the header with channels and units measured by the device
def get_header(self, path):
with open(path, "r") as file:
header = []
content = file.readlines()
for i, line in enumerate(content):
if "[CHANNELS]" in line: # get all the channels that the device measure
start_idx = i + 1
end_idx = start_idx
while len(content[end_idx]) > 1:
channel = split(r":", content[end_idx].strip())[-1]
end_idx += 1
header.append(channel)
if "[UNITS]" in line: # get measure units and add to the header
counter = 0
header_len = len(header)
start_idx = i + 1
end_idx = start_idx
while len(content[end_idx]) > 1 and counter < header_len:
unit = content[end_idx].strip()
header[counter] = f"{header[counter]} ({unit})"
counter += 1
end_idx += 1
timestamp = header[-1]
header.pop(-1)
header.insert(0, timestamp)
return header
# from a .tsf file get only the content without the header
def get_content(self, path):
with open(path, "r") as file:
content = file.readlines()
for i, line in enumerate(content):
if "[DATA]" in line:
start_idx = i + 1
while len(content[start_idx]) <= 1:
start_idx += 1
return content[start_idx:]
# process the file and write the content in CSV format in the output file
def process(self, file_path, output_path):
output_path = self.get_output_path(file_path, output_path)
header = self.get_header(file_path)
self.append_row_in_file(header, output_path) # add header in the output csv file
content = self.get_content(file_path)
last_dt = None
for line in content:
data = self.data_row_validator(line)
if data != None:
date = self.format_datetime(data[0])
columns = data[1:]
if last_dt is None or (last_dt is not None and (last_dt - date).total_seconds() <= -1):
last_dt = date
self.append_row_in_file([date, *columns], output_path)
# validate each content line and remove the NaN row or the row that dont have a correct datetime
def data_row_validator(self, row):
if "\x00" not in row:
data = split(r"\s{2,}", row.strip())
if data[0] != "" and data[0] != None:
try:
return data
except Exception as e:
print(f"[ERROR]: Error on formatting date | {e}")
return None
# append a array in a file using CSV format with numpy
def append_row_in_file(self, data, output_file):
output = asarray([[str(item) for item in data]])
with open(output_file, "a") as file:
savetxt(file, output, fmt="%s", delimiter=",", newline="\n")
# reformat the datetime in YYYY-MM-DD HH:mm:ss
def format_datetime(self, string):
date = string.split(" ")
return datetime.strptime(f"{'-'.join(date[:3])} {':'.join(date[3:])}", "%Y-%m-%d %H:%M:%S")
# from the input path and the output path generate the new CSV file path (get only the filename from the input path)
def get_output_path(self, input_path, output_path):
output = output_path
if not output.endswith(os.path.sep):
output += os.path.sep
file_name = os.path.basename(input_path).split(".")[0]
return f"{output}/{file_name}.csv"
def main():
igrav = iGrav()
input_path = sys.argv[1]
output_path = sys.argv[2]
if os.path.exists(input_path) and os.path.exists(output_path):
file_list = igrav.get_all_tfs(input_path)
for path in file_list:
igrav.process(path, output_path)
else:
print("[ERROR]: Input or Output path doesn't exist!")
if __name__ == "__main__":
main()
|
lucamir/iGravToCSV
|
main.py
|
main.py
|
py
| 4,775 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "glob.glob",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "numpy.savetxt",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "os.path",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 111,
"usage_type": "attribute"
}
] |
24692323834
|
#Import libraries
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
'''
Function for loading disaster reponse messages_filepath
Arguments:
messages_filepath: File path to file containing disaster
response messages
categories_filepath: File path to file containing disaster
response classification
Returns:
df: A dataframe containing the merged datasets
'''
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = pd.merge(left=messages,right=categories,how='left',on='id')
return df
def clean_data(df):
'''
Function for cleaning the disaster response message dataset
Arguments:
df: Pandas dataframe
Returns:
df: Pandas dataframe
'''
# create a dataframe of the 36 individual category columns
categories = df['categories'].str.split(';',expand=True)
# select the first row of the categories dataframe
row = categories.iloc[0,:]
# use this row to extract a list of new column names for categories.
category_colnames = row.apply(lambda row: row[:-2]).values
# rename the columns of `categories`
categories.columns = category_colnames
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].apply(lambda row: row[-1])
# convert column from string to numeric
categories[column] = categories[column].astype(int)
df.drop('categories',axis=1,inplace=True)
# concatenate the original dataframe with the new `categories` dataframe
df = pd.concat([df,categories],axis=1)
# Remove erroneous values
df = df[df['related']!=2]
# drop duplicates
df_dedup = df.drop_duplicates(subset='id')
df = df_dedup.drop_duplicates(subset='message',keep=False)
return df
def save_data(df, database_filename):
'''
Function for saving a dataset to a SQLlite database
Arguments:
df: Pandas dataframe. Dataset that needs to be saved
database_filename: Location where database should be saved
'''
engine = create_engine('sqlite:///{}'.format(database_filename))
df.to_sql('Disaster_messages', engine, index=False,if_exists='replace')
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main()
|
Rmostert/Disaster-response-pipeline
|
data/process_data.py
|
process_data.py
|
py
| 3,414 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pandas.merge",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 88,
"usage_type": "attribute"
}
] |
6919943057
|
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GdkPixbuf
class View(Gtk.Window):
# TODO: Make GUI prettier - low priority
# TODO: Change metric to *C and imperial to *F
def __init__(self):
super().__init__(title='Weather Forecast')
self._box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
self.add(self._box)
self._enter_city = Gtk.Entry()
self._enter_city.set_text('Enter city')
self._box.add(self._enter_city)
self._search_button = Gtk.Button('Search')
self._box.add(self._search_button)
self._units_format_combo = Gtk.ComboBoxText()
self._units_format_combo.append('metric', 'metric')
self._units_format_combo.append('imperial', 'imperial')
self._box.add(self._units_format_combo)
self._weather_image = Gtk.Image()
self._box.add(self._weather_image)
self._city_label = Gtk.Label()
self._box.add(self._city_label)
self._temperature_label = Gtk.Label()
self._box.add(self._temperature_label)
self._conditions_label = Gtk.Label()
self._box.add(self._conditions_label)
self._description_label = Gtk.Label()
self._box.add(self._description_label)
self._up_to_date_label = Gtk.Label()
self._box.add(self._up_to_date_label)
self.connect('destroy', Gtk.main_quit)
@staticmethod
def run():
Gtk.main()
def set_weather_icon(self, icon):
weather_icon_path = f'./icons/{self._get__weather_image_icon(icon)}.svg'
pixbuf = GdkPixbuf.Pixbuf().new_from_file(weather_icon_path)
self._weather_image.set_from_pixbuf(pixbuf=pixbuf)
def set_city(self, city):
self._city_label.set_label(city)
def set_temperature(self, temperature, units_format):
units_format_display = 'C' if units_format == 'metric' else 'F'
self._temperature_label.set_label(f'{temperature}\u00B0{units_format_display}')
def set_conditions(self, conditions):
self._conditions_label.set_label(conditions)
def set_description(self, description):
self._description_label.set_label(description)
def on_search(self, callback):
self._search_button.connect('clicked', lambda widget: callback(self._enter_city.get_text()
if self._enter_city.get_text() != 'Enter city' else ''))
def set_units_format(self, unit_format):
self._units_format_combo.set_active_id(unit_format)
def on_units_format_changed(self, callback):
self._units_format_combo.connect('changed', lambda widget: callback(self._units_format_combo.get_active_id()))
def set_up_to_date_message(self, is_weather_up_to_date=False):
color = 'green' if is_weather_up_to_date else 'red'
up_to_date_message = 'Less then 2 hours ago' if is_weather_up_to_date else 'More than 2 hours ago'
self._up_to_date_label.set_markup(f'<span color="{color}">Last update:\n{up_to_date_message}</span>')
def show_dialog(self, status):
if status == 'Unauthorized':
dialog_title = 'Authorization problem'
dialog_text = 'Wrong API key'
elif status == 'ConnectionError':
dialog_title = 'Connection problem'
dialog_text = 'Check internet connection'
elif status == 'NotFound':
dialog_title = 'City not found'
dialog_text = 'Try another city'
else:
dialog_title = 'Unknown problem'
dialog_text = 'Problem not known'
dialog = Gtk.MessageDialog(
transient_for=self,
flags=0,
message_type=Gtk.MessageType.ERROR,
buttons=Gtk.ButtonsType.OK,
text=dialog_title
)
dialog.format_secondary_text(dialog_text)
dialog.run()
dialog.destroy()
@staticmethod
def _get__weather_image_icon(icon_from_api):
icons_mapping = {
'01d': 'weather-clear',
'01n': 'weather-clear-night',
'02d': 'weather-few-clouds',
'02n': 'weather-clouds-night',
'03d': 'weather-clouds',
'03n': 'weather-few-clouds-night',
'04d': 'weather-overcast',
'04n': 'weather-overcast',
'09d': 'weather-showers-scattered',
'09n': 'weather-showers-scattered',
'10d': 'weather-showers',
'10n': 'weather-showers',
'11d': 'weather-storm',
'11n': 'weather-storm',
'13d': 'weather-snow',
'13n': 'weather-snow',
'50d': 'weather-fog',
'50n': 'weather-fog',
'N/A': 'weather-none'
}
return icons_mapping[icon_from_api]
|
lukasz130/WeatherForecast
|
sources/view.py
|
view.py
|
py
| 4,949 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "gi.require_version",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk.Window",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Box",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Orientation",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk.Entry",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Button",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.ComboBoxText",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Image",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Label",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Label",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Label",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Label",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.Label",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.main_quit",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.main",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "gi.repository.GdkPixbuf.Pixbuf",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "gi.repository.GdkPixbuf",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.MessageDialog",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.MessageType",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "gi.repository.Gtk.ButtonsType",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 103,
"usage_type": "name"
}
] |
7926234515
|
import pandas as pd
import yfinance as yf
# Read the symbols from a CSV file
symbols_df = pd.read_csv("symbols.csv")
symbols = symbols_df["Symbol"].tolist()
# Specify the years
years = [2021, 2022]
# Create an empty list to store the dataframes for each stock
dfs = []
# Iterate over the symbols
for symbol in symbols:
try:
# Get the dividends data for the specified years
stock = yf.Ticker(symbol)
dividends = stock.dividends
for year in years:
dividends_year = dividends.loc[str(year)]
if dividends_year.empty:
print(f"No dividend data available for {symbol} in {year}.")
continue
# Get the market capitalization data
try:
market_cap = float(stock.info["marketCap"])
except KeyError:
market_cap = None
# Get the stock price data for the specified year
stock_price = stock.history(start=f"{year}-01-01", end=f"{year}-12-31")
# Calculate the price at the beginning and end of the year
price_at_beginning = stock_price.iloc[0]['Close']
price_at_end = stock_price.iloc[-1]['Close']
# Create a dataframe for the current stock and year
data = {
"Symbol": symbol,
"Year": year,
"Dividend Date": dividends_year.index.strftime('%m/%d/%Y'),
"Market Capitalization": market_cap,
"Count of total dividends paid for that year": len(dividends_year),
"How much was paid": dividends_year.tolist(),
"Price at Beginning of Year": price_at_beginning,
"Price at End of Year": price_at_end
}
df = pd.DataFrame(data)
# Append the dataframe to the list
dfs.append(df)
except Exception as e:
print(f"Got error from Yahoo API for ticker {symbol}, Error: {str(e)}")
print(f"Skipping symbol {symbol} due to data unavailability.")
# Concatenate the dataframes for all stocks and years
result_df = pd.concat(dfs)
# Convert columns to float type
result_df["Market Capitalization"] = result_df["Market Capitalization"].astype(float)
result_df["Count of total dividends paid for that year"] = result_df["Count of total dividends paid for that year"].astype(float)
# Save the dataframe to a CSV file
result_df.to_csv("temp_dividend_data_2022_2023.csv", index=False)
|
kmlspktaa/data-analytics
|
economics/dividends-trading/development/dividend-stocks.py
|
dividend-stocks.py
|
py
| 2,490 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "yfinance.Ticker",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 62,
"usage_type": "call"
}
] |
25875757160
|
from numba import jit
import numpy as np
from obspy.taup import TauPyModel
import os
@jit(nopython=True, fastmath=True)
def coords_lonlat_rad_bearing(lat1, lon1, dist_deg, brng):
"""
Returns the latitude and longitude of a new cordinate that is the defined distance away and
at the correct bearing from the starting point.
Parameters
----------
lat1 : float
Starting point latitiude.
lon1 : float
Starting point longitude.
dist_deg : float
Distance from starting point in degrees.
brng : float
Angle from north describing the direction where the new coordinate is located.
Returns
-------
lat2 : float
Longitude of the new cordinate.
lon2 : float
Longitude of the new cordinate.
"""
brng = np.radians(brng) # convert bearing to radians
d = np.radians(dist_deg) # convert degrees to radians
lat1 = np.radians(lat1) # Current lat point converted to radians
lon1 = np.radians(lon1) # Current long point converted to radians
lat2 = np.arcsin(
(np.sin(lat1) * np.cos(d)) + (np.cos(lat1) * np.sin(d) * np.cos(brng))
)
lon2 = lon1 + np.arctan2(
np.sin(brng) * np.sin(d) * np.cos(lat1), np.cos(d) - np.sin(lat1) * np.sin(lat2)
)
lat2 = np.degrees(lat2)
lon2 = np.degrees(lon2)
# lon2 = np.where(lon2 > 180, lon2 - 360, lon2)
# lon2 = np.where(lon2 < -180, lon2 + 360, lon2)
if lon2 > 180:
lon2 -= 360
elif lon2 < -180:
lon2 += 360
else:
pass
return lat2, lon2
@jit(nopython=True, fastmath=True)
def haversine_deg(lat1, lon1, lat2, lon2):
"""
Function to calculate the distance in degrees between two points on a sphere.
Parameters
----------
lat1 : float
Latitiude of point 1.
lat1 : float
Longitiude of point 1.
lat2 : float
Latitiude of point 2.
lon2 : float
Longitude of point 2.
Returns
-------
d : float
Distance between the two points in degrees.
"""
dlat = np.radians(lat2 - lat1)
dlon = np.radians(lon2 - lon1)
a = (np.sin(dlat / 2)) ** 2 + np.cos(np.radians(lat1)) * np.cos(
np.radians(lat2)
) * (np.sin(dlon / 2)) ** 2
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a))
d = np.degrees(c)
return d
def deg_km_az_baz(lat1, lon1, lat2, lon2):
"""
Function to return the ditances in degrees and km over a spherical Earth
with the backazimuth and azimuth. Distances calculated using the haversine
formula.
Parameters
----------
lat(1/2) : float
Latitude of point (1/2)
lon(1/2) : float
Longitude of point (1/2)
Returns
-------
dist_deg : float
Distance between points in degrees.
dist_km :
Distance between points in km.
az : float
Azimuth at location 1 pointing to point 2.
baz : float
Backzimuth at location 2 pointing to point 1.
"""
# use haversine formula to get distance in degrees and km
R = 6371
dist_deg = haversine_deg(lat1, lon1, lat2, lon2)
dist_km = np.radians(dist_deg) * R
az = np.degrees(
np.arctan2(
(np.sin(np.radians(lon2 - lon1)) * np.cos(np.radians(lat2))),
np.cos(np.radians(lat1)) * np.sin(np.radians(lat2))
- np.sin(np.radians(lat1))
* np.cos(np.radians(lat2))
* np.cos(np.radians(lon2 - lon1)),
)
)
# baz=np.degrees(np.arctan2((np.sin(np.radians(lon1-lon2))*np.cos(np.radians(lat1))), np.cos(np.radians(lat2))*np.sin(np.radians(lat1)) - np.sin(np.radians(lat2))*np.cos(np.radians(lat1))*np.cos(np.radians(lon1-lon2)) ))
dLon = np.radians(lon1 - lon2)
y = np.sin(dLon) * np.cos(np.radians(lat1))
x = np.cos(np.radians(lat2)) * np.sin(np.radians(lat1)) - np.sin(
np.radians(lat2)
) * np.cos(np.radians(lat1)) * np.cos(dLon)
baz = np.arctan2(y, x)
baz = np.degrees(baz)
if baz < 0:
baz = (baz + 360) % 360
return dist_deg, dist_km, az, baz
def relocate_event_baz_slow(evla, evlo, evdp, stla, stlo, baz, slow, phase, mod='prem'):
"""
Given event location, mean station location and slowness vector
(baz and slow), relocate the event so the ray arrives with the
slowness and backazimuth.
Paramters
---------
evla : float
Event latitude.
evlo : float
Event longitude.
evdp : float
Event depth.
stla : float
Station latitude.
stlo : float
Station longitude.
baz : float
Backazimuth of slowness vector.
slow : float
Horizontal slowness of slowness vector.
phase : string
Target phase (e.g. SKS).
mod : string
1D velocity model to use (default is PREM).
Returns
-------
new_evla : float
Relocated event latitude.
new_evlo : float
Relocated event longitude.
"""
model = TauPyModel(model=mod)
dist_deg = haversine_deg(lat1=evla, lon1=evlo, lat2=stla, lon2=stlo)
# define distances to search over
dist_min=dist_deg-30
dist_max=dist_deg+30
dist_search = np.linspace(dist_min, dist_max, 1000)
# set count so it know if it has found a suitable distance
count=0
diff_slows = np.ones(dist_search.shape)
# if the difference just keeps increasing
# stop after 20 increases
early_stop_count = 0
for i,test_distance in enumerate(dist_search):
try:
## work out slowness and compare to the observed slowness
tap_out_test = model.get_travel_times(source_depth_in_km=float(evdp),
distance_in_degree=float(test_distance),
receiver_depth_in_km=0.0,
phase_list=[phase])
abs_slow_test = tap_out_test[0].ray_param_sec_degree
diff_slow = abs(abs_slow_test - slow)
## work out slowness and compare to the observed slowness
diff_slows[i] = diff_slow
if diff_slow > diff_slows[i-1]:
early_stop_count +=1
else:
early_stop_count = 0
if early_stop_count > 20:
print('increasing risidual for more than 20 iterations, breaking loop')
break
except:
pass
min = np.amin(np.array(diff_slows))
loc = np.where(np.array(diff_slows) == min)[0][0]
distance_at_slowness = dist_search[loc]
new_evla, new_evlo = coords_lonlat_rad_bearing(lat1 = stla,
lon1 = stlo,
dist_deg = distance_at_slowness,
brng = baz)
return new_evla, new_evlo
def predict_pierce_points(evla, evlo, evdp, stla, stlo, phase, target_depth, mod='prem'):
"""
Given station and event locations, return the pierce points at a particular
depth for source or receiver side locations.
Parameters
----------
evla : float
Event latitude.
evlo : float
Event longitude.
evdp : float
Event depth.
stla : float
Station latitude.
stlo : float
Station longitude.
phase : string
Target phase
target_depth : float
Depth to calculate pierce points.
mod : string
1D velocity model to use (default is PREM).
Returns
-------
r_pierce_la : float
Receiver pierce point latitude.
r_pierce_lo : float
Receiver pierce point longitude.
s_pierce_la : float
Source pierce point latitude.
s_pierce_lo : float
Source pierce point longitude.
"""
# I dont like the obspy taup pierce thing so will use
# the java script through python.
# This will assume you have taup installed:
# https://github.com/crotwell/TauP/
# print(f"taup_pierce -mod {mod} -h {evdp} -sta {stla} {stlo} -evt {evla} {evlo} -ph {phase} --pierce {target_depth} --nodiscon > ./temp.txt")
os.system(f"taup_pierce -mod {mod} -h {evdp} -sta {stla} {stlo} -evt {evla} {evlo} -ph {phase} --pierce {target_depth} --nodiscon > ./temp.txt")
# check number of lines
with open("./temp.txt", 'r') as temp_file:
lines_test = temp_file.readlines()
number_of_lines_test = len(lines_test)
with open("./temp.txt", 'r') as temp_file:
lines = temp_file.readlines()
number_of_lines = len(lines)
if number_of_lines == 2:
print(f"Only pierces depth {target_depth} once.")
print(f"Writing this one line to the file.")
source_line = lines[-1]
receiver_line = lines[-1]
elif number_of_lines == 3:
source_line = lines[1]
receiver_line = lines[-1]
elif number_of_lines > 3:
print(f"Phase {phase} pierces depth {target_depth} more than twice.")
print(f"Writing pierce point closest to source/receiver")
source_line = lines[1]
receiver_line = lines[-1]
if number_of_lines != 0:
s_dist, s_pierce_depth, s_time, s_pierce_la, s_pierce_lo = source_line.split()
r_dist, r_pierce_depth, r_time, r_pierce_la, r_pierce_lo = receiver_line.split()
else:
print('Neither the phase nor ScS can predict this arrival, not continuing')
s_pierce_la = 'nan'
s_pierce_lo = 'nan'
r_pierce_la = 'nan'
r_pierce_lo = 'nan'
# os.remove("./temp.txt")
return s_pierce_la, s_pierce_lo, r_pierce_la, r_pierce_lo
|
eejwa/Array_Seis_Circle
|
circ_array/geo_sphere_calcs.py
|
geo_sphere_calcs.py
|
py
| 9,707 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "numpy.radians",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.arcsin",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.arctan2",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.degrees",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.degrees",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numba.jit",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.arctan2",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.degrees",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numba.jit",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "numpy.degrees",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "numpy.arctan2",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.arctan2",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "numpy.degrees",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "obspy.taup.TauPyModel",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "numpy.amin",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 287,
"usage_type": "call"
}
] |
20824483233
|
import inflect
def main():
p = inflect.engine()
names = []
while True:
try:
name = input("Name: ")
names.append(name)
except(EOFError, KeyboardInterrupt):
names = p.join(names)
print("Adieu, adieu, to " + names)
quit()
main()
|
lauriwesterlund/CS50P
|
Solutions/adieu.py
|
adieu.py
|
py
| 341 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "inflect.engine",
"line_number": 5,
"usage_type": "call"
}
] |
38259209826
|
import cv2
#Configurable Parameters
inputValue = int(input("Enter the scale value to resize the image: (0 - 100): "))
if inputValue >= 0 and inputValue <= 100:
source = "wx.jpg"
destination = 'newImage.png'
scale_percent = inputValue
src = cv2.imread(source, cv2.IMREAD_UNCHANGED)
#cv2.imshow("title", src)
#Percentage by which the image is resize
#Calculate the 50 percent of original dimensions
new_width = int(src.shape[1] * scale_percent / 100)
new_height = int(src.shape[0] * scale_percent / 100)
dsize = (new_width, new_height)
output = cv2.resize(src, dsize)
cv2.imwrite(destination, output)
#cv2.waitKey(0)
else:
print("Enter correct values")
|
sundaram-sharma/image-resizer-python
|
main.py
|
main.py
|
py
| 715 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.imread",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_UNCHANGED",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 24,
"usage_type": "call"
}
] |
16908443054
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import sys
import os
from os import path
import numpy as np
from PIL import Image
import datetime
import matplotlib.pyplot as plt
import time
sys.path.append(os.getcwd() + "/lib/wordcloud")
from wordcloud import WordCloud
text = "初鳩,初花,初針,初春,初日,初日影,初飛行,初披講,初日の出,初雲雀,初富士,初冬,初風呂,初箒,初蛍,初参り,初松風,初神籤,初彌撒,春小袖," \
"春炬燵,春寒,春雨,春芝居,春近し,春隣,春の馬,春の海,春の蚊,春の風,春の川,春の草,春の雲,春の暮,春の蟬,春の空,春の田,春の月,春の虹," \
"春の野,春の日,春の服,春,2,3,春の星,はるのほし,春,2,3,春の水,はるのみず,春,2,3,春の山,はるのやま,春,2,3,春の闇,はるのやみ,春," \
"2,3,春の雪,はるのゆき,春,2,3,春の夜,はるのよ,春,2,春の宵,はるのよい,2,3,春日傘,はるひがさ,春,2,3,春深し,はるふかし,春,2,3,春待つ," \
"はるまつ,冬,2,春祭,はるまつり,春,2,春めく,はるめく,春,2,春休み,はるやすみ,春,2,春夕焼,はるゆうやけ,春,2,バレンタインデー," \
"ばれんたいんでー,春,晩夏,ばんか,夏,晩菊,ばんぎく,秋,万愚節,ばんぐせつ,春,半夏,はんげ,夏,半夏生,はんげしょう,夏,パンジー,ぱんじー," \
"春,晩秋,ばんしゅう,秋,晩春,ばんしゅん,春,半仙戯,はんせんぎ,春,晩霜,ばんそう,春,斑猫,はんみょう,夏,晩涼,ばんりょう,夏,万緑," \
"ばんりょく,夏,日脚伸ぶ,ひあしのぶ,ビーチパラソル,びーちぱらそる,夏,ひひな,ひいな,春,柊,ひいらぎ,冬,麦酒,びーる,夏".encode('utf-8')
d = path.dirname(__file__)
start_day = datetime.date.today()
logo_mask = np.array(Image.open(
path.join(d, os.getcwd() + "/mask/logo_mask.png")))
spring_mask = np.array(Image.open(
path.join(d, os.getcwd() + "/mask/spring-mask.png")))
newyear_mask = np.array(Image.open(
path.join(d, os.getcwd() + "/mask/newyear-mask.png")))
summer_mask = np.array(Image.open(
path.join(d, os.getcwd() + "/mask/summer-mask.png")))
autumn_mask = np.array(Image.open(
path.join(d, os.getcwd() + "/mask/autumn-mask.png")))
winter_mask = np.array(Image.open(
path.join(d, os.getcwd() + "/mask/winter-mask.png")))
total_f = open("./text/total_wordcloud.txt")
total_text = total_f.read()
weekly_f = open("./text/weekly_wordcloud.txt")
weekly_text = weekly_f.read()
monthly_f = open("./text/monthly_wordcloud.txt")
monthly_text = monthly_f.read()
newyear_f = open("./text/newyear_wordcloud.txt")
newyear_text = newyear_f.read()
spring_f = open("./text/spring_wordcloud.txt")
spring_text = spring_f.read()
summer_f = open("./text/summer_wordcloud.txt")
summer_text = summer_f.read()
autumn_f = open("./text/autumn_wordcloud.txt")
autumn_text = autumn_f.read()
winter_f = open("./text/winter_wordcloud.txt")
winter_text = winter_f.read()
while True:
today = datetime.date.today()
wordcloud = WordCloud(background_color="lightcyan", mode="RGB", font_path="./ヒラギノ角ゴシック W5.ttc",
mask=logo_mask, width=600, height=600, colormap="gist_rainbow").generate(total_text)
wordcloud.to_file("./image/total_wordcloud.png")
print("total_wordcloud was generated.")
wordcloud = WordCloud(background_color="lightcyan", mode="RGB", font_path="./ヒラギノ角ゴシック W5.ttc",
mask=logo_mask, width=600, height=600, colormap="gist_rainbow").generate(text.decode('utf-8'))
wordcloud.to_file("./image/weekly_wordcloud.png")
print("weekly_wordcloud was generated.")
wordcloud = WordCloud(background_color="lightcyan", mode="RGB", font_path="./ヒラギノ角ゴシック W5.ttc",
mask=logo_mask, width=600, height=600, colormap="gist_rainbow").generate(text.decode('utf-8'))
wordcloud.to_file("./image/monthly_wordcloud.png")
print("monthly_wordcloud was generated.")
wordcloud = WordCloud(background_color="moccasin", font_path="./ヒラギノ角ゴシック W5.ttc",
mask=newyear_mask, width=600, height=600, colormap="Reds").generate(text.decode('utf-8'))
wordcloud.to_file("./image/newyear_wordcloud.png")
print("newyear_wordcloud was generated.")
wordcloud = WordCloud(background_color="palegreen", font_path="./ヒラギノ角ゴシック W5.ttc",
mask=spring_mask, width=600, height=600, colormap="spring").generate(text.decode('utf-8'))
wordcloud.to_file("./image/spring_wordcloud.png")
print("spring_wordcloud was generated.")
wordcloud = WordCloud(background_color="paleturquoise", font_path="./ヒラギノ角ゴシック W5.ttc",
mask=summer_mask, width=600, height=600, colormap="summer").generate(text.decode('utf-8'))
wordcloud.to_file("./image/summer_wordcloud.png")
print("summer_wordcloud was generated.")
wordcloud = WordCloud(background_color="darkslategray", font_path="./ヒラギノ角ゴシック W5.ttc",
mask=autumn_mask, width=600, height=600, colormap="autumn").generate(text.decode('utf-8'))
wordcloud.to_file("./image/autumn_wordcloud.png")
print("autumn_wordcloud was generated.")
wordcloud = WordCloud(background_color="midnightblue", font_path="./ヒラギノ角ゴシック W5.ttc",
mask=winter_mask, width=600, height=600, colormap="PuBuGn").generate(text.decode('utf-8'))
wordcloud.to_file("./image/winter_wordcloud.png")
print("winter_wordcloud was generated.")
time.sleep(86400)
|
PL2GroupJ/PyWordCloud
|
wc.py
|
wc.py
|
py
| 5,758 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "datetime.date.today",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "os.getcwd",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "os.getcwd",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "os.getcwd",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "os.getcwd",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "os.getcwd",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "os.getcwd",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "wordcloud.WordCloud",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "wordcloud.to_file",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "wordcloud.WordCloud",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "wordcloud.to_file",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "wordcloud.WordCloud",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "wordcloud.to_file",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "wordcloud.WordCloud",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "wordcloud.to_file",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "wordcloud.WordCloud",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "wordcloud.to_file",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "wordcloud.WordCloud",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "wordcloud.to_file",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "wordcloud.WordCloud",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "wordcloud.to_file",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "wordcloud.WordCloud",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "wordcloud.to_file",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 111,
"usage_type": "call"
}
] |
22378324072
|
from rest_framework.response import Response
from rest_framework.decorators import api_view
from .serializers import PersonSerializer
from .models import Person
from rest_framework import status
from rest_framework.permissions import IsAdminUser
from rest_framework.decorators import api_view, permission_classes
@api_view(['GET','POST'])
def home(request):
if request.method == 'POST':
name = request.data['name']
return Response({'name':f'my name is {name}'})
else:
return Response({'name':'sina'})
@api_view()
def persons(request):
person = Person.objects.all()
ser_data = PersonSerializer(person,many=True)
return Response(ser_data.data,status=status.HTTP_200_OK)
@api_view()
@permission_classes([IsAdminUser])
def person(request,name):
try:
person = Person.objects.get(name=name)
except Person.DoesNotExist:
return Response({'error':'this user does not exist'},status=status.HTTP_404_NOT_FOUND)
ser_data = PersonSerializer(person)
return Response(ser_data.data,status=status.HTTP_200_OK)
#create_user
@api_view(['POST'])
def person_create(request):
info=PersonSerializer(data=request.data)
if info.is_valid():
info.save()
return Response({'message':'ok'},status=status.HTTP_201_CREATED)
else:
return Response(info.errors,status=status.HTTP_400_BAD_REQUEST)
|
sinajamshidi247/django_rest_framework
|
A/home/views.py
|
views.py
|
py
| 1,392 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "rest_framework.response.Response",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "models.Person.objects.all",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "models.Person.objects",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "models.Person",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "serializers.PersonSerializer",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "models.Person.objects.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "models.Person.objects",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "models.Person",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "models.Person.DoesNotExist",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "models.Person",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_404_NOT_FOUND",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "serializers.PersonSerializer",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "rest_framework.decorators.permission_classes",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "rest_framework.permissions.IsAdminUser",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "serializers.PersonSerializer",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_201_CREATED",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 40,
"usage_type": "call"
}
] |
10422627143
|
from __future__ import annotations
import asyncio
import dataclasses
import logging
import uuid
from typing import TYPE_CHECKING, Self
from PySide6.QtCore import QObject, Signal
from randovania.bitpacking.json_dataclass import JsonDataclass
from randovania.interface_common.players_configuration import INVALID_UUID
from randovania.lib import json_lib, migration_lib
if TYPE_CHECKING:
from collections.abc import Iterable
from pathlib import Path
_MIGRATIONS = [
# lambda data: data,
]
CURRENT_VERSION = migration_lib.get_version(_MIGRATIONS)
def migrate_to_current(data: dict):
return migration_lib.apply_migrations(data, _MIGRATIONS, copy_before_migrating=True)
@dataclasses.dataclass(frozen=True)
class WorldServerData(JsonDataclass):
world_name: str
session_id: int
session_name: str
def _combine_tuples(existing: tuple[int, ...], new_indices: Iterable[int]) -> tuple[int, ...]:
new = set(existing)
for it in new_indices:
new.add(it)
return tuple(sorted(new))
@dataclasses.dataclass(frozen=True)
class WorldData(JsonDataclass):
collected_locations: tuple[int, ...] = ()
uploaded_locations: tuple[int, ...] = ()
latest_message_displayed: int = 0
server_data: WorldServerData | None = None
def extend_collected_location(self, new_indices: Iterable[int]) -> Self:
return dataclasses.replace(
self,
collected_locations=_combine_tuples(self.collected_locations, new_indices),
)
def extend_uploaded_locations(self, new_indices: Iterable[int]) -> Self:
return dataclasses.replace(
self,
uploaded_locations=_combine_tuples(self.uploaded_locations, new_indices),
)
class WorldDatabase(QObject):
_all_data: dict[uuid.UUID, WorldData]
_persist_path: Path
WorldDataUpdate = Signal()
def __init__(self, persist_path: Path):
super().__init__()
self.logger = logging.getLogger(__name__)
persist_path.mkdir(parents=True, exist_ok=True)
self._persist_path = persist_path
self.logger.info("Using %s as database path", persist_path)
self._all_data = {}
self._lock = asyncio.Lock()
async def _read_data(self, uid: uuid.UUID) -> WorldData:
raw_data = await json_lib.read_path_async(self._persist_path.joinpath(f"{uid}.json"))
return WorldData.from_json(migrate_to_current(raw_data)["data"])
async def _write_data(self, uid: uuid.UUID, data: WorldData):
json_lib.write_path(
self._persist_path.joinpath(f"{uid}.json"),
{
"schema_version": CURRENT_VERSION,
"data": data.as_json,
},
)
async def load_existing_data(self):
for f in self._persist_path.glob("*.json"):
try:
uid = uuid.UUID(f.stem)
except ValueError:
self.logger.warning("File name is not a UUID: %s", f)
continue
if uid != INVALID_UUID:
self._all_data[uid] = await self._read_data(uid)
def get_data_for(self, uid: uuid.UUID) -> WorldData:
if uid == INVALID_UUID:
raise ValueError("UID not allowed for Multiworld")
if uid not in self._all_data:
self._all_data[uid] = WorldData()
return self._all_data[uid]
async def set_data_for(self, uid: uuid.UUID, data: WorldData):
await self.set_many_data({uid: data})
async def set_many_data(self, new_data: dict[uuid.UUID, WorldData]):
async with self._lock:
for uid, data in new_data.items():
if data != self._all_data.get(uid):
self._all_data[uid] = data
await self._write_data(uid, data)
self.WorldDataUpdate.emit()
def get_locations_to_upload(self, uid: uuid.UUID) -> tuple[int, ...]:
data = self.get_data_for(uid)
return tuple(i for i in sorted(data.collected_locations) if i not in data.uploaded_locations)
def all_known_data(self) -> Iterable[uuid.UUID]:
yield from self._all_data.keys()
|
randovania/randovania
|
randovania/interface_common/world_database.py
|
world_database.py
|
py
| 4,141 |
python
|
en
|
code
| 165 |
github-code
|
6
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "randovania.lib.migration_lib.get_version",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "randovania.lib.migration_lib",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "randovania.lib.migration_lib.apply_migrations",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "randovania.lib.migration_lib",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "randovania.bitpacking.json_dataclass.JsonDataclass",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "collections.abc.Iterable",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "randovania.bitpacking.json_dataclass.JsonDataclass",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "collections.abc.Iterable",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "dataclasses.replace",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "typing.Self",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "collections.abc.Iterable",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "dataclasses.replace",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "typing.Self",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "PySide6.QtCore.QObject",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "uuid.UUID",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "PySide6.QtCore.Signal",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "asyncio.Lock",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "uuid.UUID",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "randovania.lib.json_lib.read_path_async",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "randovania.lib.json_lib",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "uuid.UUID",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "randovania.lib.json_lib.write_path",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "randovania.lib.json_lib",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "uuid.UUID",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "randovania.interface_common.players_configuration.INVALID_UUID",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "uuid.UUID",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "randovania.interface_common.players_configuration.INVALID_UUID",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "uuid.UUID",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "uuid.UUID",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "uuid.UUID",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "collections.abc.Iterable",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "uuid.UUID",
"line_number": 128,
"usage_type": "attribute"
}
] |
39627693477
|
# __author__ = "Chang Gao"
# __copyright__ = "Copyright 2018 to the author"
# __license__ = "Private"
# __version__ = "0.1.0"
# __maintainer__ = "Chang Gao"
# __email__ = "[email protected]"
# __status__ = "Prototype"
import sys
import os
import torch as t
import torch.nn.functional as F
from torch.autograd.function import Function
import time
import math
def save_normalization(save_path, tr_mean, tr_std, lab_mean, lab_std):
fn_base = os.path.splitext(save_path)[0]
print("\nSaving normalization parameters to " + str(fn_base)+'-XX.pt')
norm = {
'tr_mean': tr_mean,
'tr_std': tr_std,
'lab_mean': lab_mean,
'lab_std': lab_std,
}
t.save(norm, str(fn_base+'-norm.pt'))
def load_normalization(save_path):
fn_base = os.path.splitext(save_path)[0]
print("\nLoading normalization parameters from ", str(fn_base))
norm = t.load(fn_base+'-norm.pt')
return norm['tr_mean'], norm['tr_std'], norm['lab_mean'], norm['lab_std']
# print command line (maybe to use in a script)
def print_commandline(parser):
args = parser.parse_args()
print('Command line:')
print('python '+os.path.basename(sys.argv[0]), end=' ')
for arg in vars(args):
print('--' + str(arg) + ' "' + str(getattr(args, arg))+'"', end=' ')
print()
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def to_sparse(x):
""" converts dense tensor x to sparse format """
x_typename = t.typename(x).split('.')[-1]
sparse_tensortype = getattr(t.sparse, x_typename)
indices = t.nonzero(x)
if indices.nelement() == 0: # if all elements are zeros
print("1", indices)
return sparse_tensortype(*x.shape)
else:
print("2", indices)
indices = indices.t()
values = x[tuple(indices[i] for i in range(indices.shape[0]))]
return sparse_tensortype(indices, values, x.size())
def quantizeTensor(x, m, n, en):
"""
:param x: input tensor
:param m: number of integer bits before the decimal point
:param n: number of fraction bits after the decimal point
:return: tensor quantized to fixed-point precision
"""
if en == 0:
return x
power = 2. ** n
clip_val = 2. ** (m + n - 1)
value = t.round(x * power)
# value = GradPreserveRoundOp.apply(x * power) # rounding
value = t.clamp(value, -clip_val, clip_val - 1) # saturation arithmetic
value = value / power
return value
def quantize_rnn(net, qi, qf, en):
for name, param in net.named_parameters():
if 'rnn' in name:
param.data = quantizeTensor(param.data, qi, qf, en)
return net
def pruneTensor(x, alpha):
"""
:param x: input tensor
:param m: number of integer bits before the decimal point
:param n: number of fraction bits after the decimal point
:return: tensor quantized to fixed-point precision
"""
n_neuron = x.size(0)
n_input = x.size(1)
prune_prob_mask = t.exp(-alpha * t.unsqueeze(t.arange(0, n_neuron), dim=1).repeat(1, n_input).float()).cuda()
prune_rand_mask = t.rand(n_neuron, n_input).cuda()
prune_mask = prune_rand_mask.masked_fill_(prune_rand_mask > prune_prob_mask, 1)
prune_mask = prune_mask.masked_fill_(prune_rand_mask <= prune_prob_mask, 0)
_, indices = t.sort(t.abs(x), 0)
# print("indices shape", indices.size())
# print("prune_mask shape", prune_mask.size())
# print("x shape", x.size())
for j in range(0, n_input):
x[indices[:, j], j] *= prune_mask[:, j]
return x
def targetedDropout(x, gamma, alpha, epoch):
"""
:param x: input tensor
:param m: number of integer bits before the decimal point
:param n: number of fraction bits after the decimal point
:return: tensor quantized to fixed-point precision
"""
t.manual_seed(epoch)
t.cuda.manual_seed_all(epoch)
n_elements = x.numel()
drop_part = round(n_elements * gamma)
weight_vec = x.view(-1)
weight_vec_abs = t.abs(weight_vec)
sorted, indices = t.sort(weight_vec_abs)
# print(sorted)
drop_indices = indices[0:drop_part]
drop_rand_mask = t.rand(drop_indices.size(0)).cuda()
drop_mask = t.ones(drop_indices.size(0)).cuda()
drop_mask = drop_mask.masked_fill_(drop_rand_mask <= alpha, 0)
weight_vec[drop_indices] *= drop_mask
weight = t.reshape(weight_vec, (x.size(0), x.size(1)))
return weight
def alignedTargetedDropout(x, gamma, alpha, num_pe, epoch):
"""
:param x: input tensor
:param m: number of integer bits before the decimal point
:param n: number of fraction bits after the decimal point
:return: tensor quantized to fixed-point precision
"""
n_rows = x.shape[0]
n_cols = x.shape[1]
# Split and shuffle weight matrix
for i in range(0, num_pe):
for j in range(0, n_cols):
targetedDropout(x[np.arange(i, n_rows, num_pe), j], gamma, alpha, epoch)
return x
class GradPreserveRoundOp(Function):
# Note that both forward and backward are @staticmethods
@staticmethod
def forward(ctx, input):
output = t.round(input)
return output
# This function has only a single output, so it gets only one gradient
@staticmethod
def backward(ctx, grad_output):
# This is a pattern that is very convenient - at the top of backward
# unpack saved_tensors and initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
grad_input = grad_output
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
# print(grad_output.size())
# if not t.equal(grad_output, QuantizeT(grad_output, dW_qp)): print("grad_output not quantized")
if ctx.needs_input_grad[0]:
grad_input = grad_output
# Return same number of parameters as "def forward(...)"
return grad_input
class GradPreserveThreshold(Function):
# Note that both forward and backward are @staticmethods
@staticmethod
def forward(ctx, input, threshold, value):
output = F.threshold(input, threshold, value)
return output
# This function has only a single output, so it gets only one gradient
@staticmethod
def backward(ctx, grad_output):
# This is a pattern that is very convenient - at the top of backward
# unpack saved_tensors and initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
grad_input = grad_output
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
# print(grad_output.size())
# if not t.equal(grad_output, QuantizeT(grad_output, dW_qp)): print("grad_output not quantized")
if ctx.needs_input_grad[0]:
grad_input = grad_output
# Return same number of parameters as "def forward(...)"
return grad_input
def look_ahead_seq(seq_in, t_width=16, padding=0, batch_first=0):
# Convert input sequence to batch first shape (seq_len, n_batch, n_feature)
seq = seq_in
if batch_first:
seq = seq_in.transpose(0, 1)
seq_len = seq.size(0)
n_batch = seq.size(1)
n_feature = seq.size(2)
# int(t.ceil(float(seq_len)/float(t_width)))
new_seq = []
for i in range(0, seq_len):
if i < seq_len - t_width:
seq_block = seq[i:i + t_width, :, :]
else:
seq_block = seq[i:, :, :]
seq_block_pad = t.zeros([t_width - (seq_len - i), n_batch, n_feature], dtype=t.float32).cuda()
seq_block = t.cat((seq_block, seq_block_pad), 0)
new_seq.append(seq_block)
new_seq = t.stack(new_seq, 0)
new_seq = new_seq.transpose(1, 2)
new_seq = new_seq.transpose(0, 1)
new_seq = new_seq.transpose(2, 3)
return new_seq
def look_around_seq(seq_in, t_width=16, padding=0, batch_first=0):
# Convert input sequence to batch first shape (seq_len, n_batch, n_feature)
seq = seq_in
if batch_first:
seq = seq_in.transpose(0, 1)
seq_len = seq.size(0)
n_batch = seq.size(1)
n_feature = seq.size(2)
# int(t.ceil(float(seq_len)/float(t_width)))
new_seq = []
for i in range(0, seq_len):
if i >= seq_len - t_width:
seq_block = seq[i - t_width:, :, :]
seq_block_pad = t.zeros([t_width - (seq_len - i) + 1, n_batch, n_feature], dtype=t.float32).cuda()
seq_block = t.cat((seq_block, seq_block_pad), 0)
elif i < t_width:
seq_block = seq[0:i + 1 + t_width, :, :]
seq_block_pad = t.zeros([t_width - i, n_batch, n_feature], dtype=t.float32).cuda()
seq_block = t.cat((seq_block, seq_block_pad), 0)
else:
seq_block = seq[i - t_width:i + 1 + t_width, :, :]
# print(seq_block.size())
new_seq.append(seq_block)
new_seq = t.stack(new_seq, 0)
new_seq = new_seq.transpose(1, 2)
new_seq = new_seq.transpose(0, 1)
new_seq = new_seq.transpose(2, 3)
return new_seq
def get_temporal_sparsity(list_layer, seq_len, threshold):
# Evaluate Sparsity
num_zeros = 0
num_elems = 0
# print(seq_len.size())
# Iterate through layers
for layer in list_layer:
all_delta_vec = layer.transpose(0, 1)
all_delta_vec = t.abs(all_delta_vec) # Take absolute values of all delta vector elements
for i, delta_vec in enumerate(all_delta_vec):
seq = delta_vec[:seq_len[i], :]
zero_mask = seq < threshold
num_zeros += t.sum(zero_mask)
num_elems += t.numel(zero_mask)
sparsity = float(num_zeros) / float(num_elems)
return sparsity
|
SensorsINI/DeltaGRU-cartpole
|
modules/util.py
|
util.py
|
py
| 10,645 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.splitext",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.save",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "torch.load",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "torch.typename",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "torch.sparse",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "torch.nonzero",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "torch.round",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "torch.clamp",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "torch.exp",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "torch.unsqueeze",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "torch.arange",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "torch.sort",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "torch.abs",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "torch.manual_seed",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "torch.cuda.manual_seed_all",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "torch.abs",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "torch.sort",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "torch.reshape",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "torch.autograd.function.Function",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "torch.round",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "torch.autograd.function.Function",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.threshold",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "torch.zeros",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 239,
"usage_type": "attribute"
},
{
"api_name": "torch.cat",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 263,
"usage_type": "attribute"
},
{
"api_name": "torch.cat",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 267,
"usage_type": "attribute"
},
{
"api_name": "torch.cat",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "torch.abs",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "torch.numel",
"line_number": 293,
"usage_type": "call"
}
] |
36568700730
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
from odoo import exceptions
from odoo.exceptions import ValidationError
import json
import datetime
import string
import requests
from datetime import date
import logging
_logger = logging.getLogger(__name__)
class hr_report(models.Model):
_name = 'hr_report'
_description = 'HR Reports'
_inherit = ['mail.thread', 'mail.activity.mixin']
_order = 'id DESC'
name = fields.Selection([('Statement Letter','Statement Letter'),('HR Letter','HR Letter')], string="Document", index=True, required=True, tracking=True)
x_employee_id = fields.Many2one('hr.employee', string="Employee", store=True, required=True, tracking=True, index=True)
state = fields.Selection([
('Draft', 'Draft'),('Submit', 'Submit'),('Completed', 'Completed')
], string='Report State' ,default='Draft', index=True, tracking=True)
active = fields.Boolean(string='Active',index=True,default=True)
def submit_report(self):
for _rec in self:
_rec.state = 'Submit'
def print_document(self):
for _rec in self:
#_view_name = ""
_context = ""
_res_model = ""
_target = "new"
_name = ""
if _rec.name == "Statement Letter":
_name = "Statement Letter"
_res_model = 'hr_statement_document'
_context = {
'default_x_employee_id': _rec.x_employee_id.id,
'default_x_hr_report_id': _rec.id,
}
elif _rec.name == "HR Letter":
_name = "HR Letter"
_res_model = 'hr_letter_document'
_context = {
'default_x_employee_id': _rec.x_employee_id.id,
'default_x_hr_report_id': _rec.id,
}
else:
return False
return {
'name': _name,
'view_type': 'form',
'view_mode': 'form',
'res_model': _res_model,
'type': 'ir.actions.act_window',
'target': _target,
'res_id': False,
'context': _context,
}
|
AMohamed389/airport4
|
hr_extend_minds/models/hr_report.py
|
hr_report.py
|
py
| 2,381 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "odoo.models.Model",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "odoo.models",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Selection",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Many2one",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Selection",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Boolean",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 28,
"usage_type": "name"
}
] |
301052917
|
import yaml
import glob
import dropbox
import os
import sys
import time, threading
import RPi.GPIO as GPIO
import time
import pygame
import sentry_sdk
from sentry_sdk import start_transaction
def loadConfig(file):
with open(file, 'r') as stream:
config_dict = yaml.safe_load(stream)
return config_dict
def clearCache(path):
files = glob.glob(path + '/*')
for f in files:
os.remove(f)
def fetchAndCacheSoundtrack(dropboxAccessToken, toPath, fromPath):
with start_transaction(op="task", name="fetchAndCacheSoundtrack"):
with dropbox.Dropbox(dropboxAccessToken) as dbx:
# List available fiels
files = dbx.files_list_folder(path='/' + fromPath, include_non_downloadable_files=False)
if len(files.entries) <= 0:
raise Exception('No files found')
# Select the last file in the folder
fileToFetch = files.entries[-1]
print(fileToFetch)
_, res = dbx.files_download(path=fileToFetch.path_lower)
# Cache the fetched file
_, extension = os.path.splitext(fileToFetch.name)
cachedFilePath = toPath + '/' + fromPath + '_music' + extension
with open(cachedFilePath, 'wb') as f:
f.write(res.content)
print('Soundtrack cached', cachedFilePath)
def configureGPIPTrigger(gpio_pin, cb):
GPIO.setup(gpio_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(gpio_pin, GPIO.RISING, callback=cb, bouncetime=500)
config = loadConfig('config.yaml')
print('Config loaded')
sentry_sdk.init(
dsn=config['sentry'],
environment=config['sentry_env'],
ignore_errors=[KeyboardInterrupt],
# Set traces_sample_rate to 1.0 to capture 100%
# of transactions for performance monitoring.
# We recommend adjusting this value in production.
traces_sample_rate=1.0
)
cachePath = config['cache_path']
clearCache(cachePath)
try:
fetchAndCacheSoundtrack(config['dropbox_access_token'], cachePath, 'up')
except:
print('No up file found')
try:
fetchAndCacheSoundtrack(config['dropbox_access_token'], cachePath, 'down')
except:
print('No down file found')
cachedUpFiles = glob.glob(cachePath + '/up_*')
cachedDownFiles = glob.glob(cachePath + '/down_*')
music_file_up = None
music_file_down = None
if len(cachedUpFiles) > 0:
music_file_up = cachedUpFiles[-1]
if len(cachedDownFiles) > 0:
music_file_down = cachedDownFiles[-1]
print('Ready using cached soundtrack up/down', music_file_up, music_file_down)
# Configure GPIO
pin_up = config['pi_signal_gpio_up']
pin_down = config['pi_signal_gpio_down']
pin_check_interval = config['pi_signal_interval_ms']
# Configure pygame mixer
pygame.mixer.init()
pygame.mixer.music.set_volume(1.0)
fade_ms = 1000
max_music_play_seconds = int(config['soundtrack_play_seconds'])
def stop_music():
print("Fading out music for", fade_ms, "ms")
pygame.mixer.music.fadeout(fade_ms)
pygame.mixer.music.unload()
def play_music(gpio_trigger):
print("Play music for trigger", gpio_trigger)
is_music_playing = pygame.mixer.music.get_busy()
if is_music_playing:
print("Music already playing")
return
is_pin_up = gpio_trigger == pin_up
selected_music = (music_file_up, music_file_down)[is_pin_up]
if selected_music == None:
print('No music to play')
return
print("Playing music for", max_music_play_seconds, "seconds", selected_music)
pygame.mixer.music.load(selected_music)
pygame.mixer.music.play(fade_ms=fade_ms)
threading.Timer(max_music_play_seconds, stop_music).start()
GPIO.setmode(GPIO.BCM)
configureGPIPTrigger(pin_up, play_music)
configureGPIPTrigger(pin_down, play_music)
print('Listening to signal on GPIO pins', pin_up, pin_down)
try:
running = True
while running:
time.sleep(1)
except:
print("quitting")
pygame.quit()
GPIO.cleanup()
sys.exit()
|
soundtecas/elevator
|
elevator.py
|
elevator.py
|
py
| 3,991 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "yaml.safe_load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sentry_sdk.start_transaction",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "dropbox.Dropbox",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.setup",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.IN",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.PUD_UP",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.add_event_detect",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.RISING",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "sentry_sdk.init",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.init",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.set_volume",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.fadeout",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.unload",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.get_busy",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.load",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.play",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "threading.Timer",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO.setmode",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.BCM",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "pygame.quit",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO.cleanup",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 141,
"usage_type": "call"
}
] |
24255674964
|
from selenium import webdriver
import csv
import config
import time
class instaInfo:
def __init__(self):
"""
init webdriver
"""
self.driver = webdriver.Chrome('chromedriver')
self.profile_url = ''
self.followers_count = 0
self.ask_url()
def ask_url(self):
"""
get Instagram profile url
"""
self.profile_url = input("Enter Instagram profile link: ")
if self.profile_url[:26] != 'https://www.instagram.com/':
print('Link must be like \'https://www.instagram.com/user_name/\'')
return ask_url(self)
def login_to_instagram(self):
"""
connect and login to Instagram
"""
try:
# connect to Instagram login page
self.driver.get('https://www.instagram.com/accounts/login/')
except Exception as e:
exit(f"Can't connect to: 'https://www.instagram.com/accounts/login/'\nError:{e}")
time.sleep(2)
try:
# input login and password
self.driver.find_element_by_name('username').send_keys(config.INSTAGRAM_LOGIN)
self.driver.find_element_by_name('password').send_keys(config.INSTAGRAM_PASSWORD)
# click to login button
self.driver.find_element_by_css_selector('button.sqdOP.L3NKy.y3zKF').click()
except Exception as e:
exit(f"Can't login!\nError:{e}")
time.sleep(3)
try:
# click save data button
self.driver.find_element_by_css_selector('button.sqdOP.L3NKy.y3zKF').click()
except Exception as e:
exit(f"Can't click !\nError:{e}")
time.sleep(3)
print("Logged in Instagram")
def connect_to_profile(self):
"""
connect to Instagram profile
"""
try:
self.driver.get(self.profile_url)
except Exception as e:
exit(f"Can't connect to: {self.profile_url}\nError:{e}")
time.sleep(3)
print(f"Connected to profile: {self.profile_url}")
def get_followers_count(self):
"""
parse count of followers
"""
try:
self.followers_count = self.driver.find_elements_by_css_selector('span.g47SY')[1].get_attribute('title')
# replace blank and convert to int type
self.followers_count = int(self.followers_count.replace(' ', ''))
except Exception as e:
exit(f"Can't get followers count: {self.profile_url}\nError:{e}")
print(f"{self.profile_url} count of followers: {self.followers_count}")
def get_profile_followers(self):
"""
get followers info
"""
# click to followers button
self.driver.find_element_by_css_selector('a.-nal3').click()
time.sleep(3)
# load all followers
last_element = ''
while last_element != self.driver.find_elements_by_css_selector('a.FPmhX.notranslate._0imsa')[-1]:
last_element = self.driver.find_elements_by_css_selector('a.FPmhX.notranslate._0imsa')[-1]
self.driver.execute_script('arguments[0].scrollIntoView(true);', last_element)
time.sleep(1)
# get links to followers
followers_link = [follower.get_attribute('href') for follower in self.driver.find_elements_by_css_selector('a.FPmhX.notranslate._0imsa')]
for follower_link in followers_link:
# connect to follower profile
self.profile_url = follower_link
instagram_info_obj.connect_to_profile()
# get count of followers
self.get_followers_count()
# write to csv
self.append_to_csv()
def append_to_csv(self):
"""
write profile row and followers count into csv file
"""
with open('instagramInfo.csv', mode='a', encoding='utf8', newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=';')
writer.writerow([self.profile_url, self.followers_count])
if __name__ == "__main__":
instagram_info_obj = instaInfo()
instagram_info_obj.login_to_instagram()
instagram_info_obj.connect_to_profile()
instagram_info_obj.get_profile_followers()
instagram_info_obj.driver.quit()
|
bfesiuk/InstagramInfo
|
info.py
|
info.py
|
py
| 4,293 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "config.INSTAGRAM_LOGIN",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "config.INSTAGRAM_PASSWORD",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 115,
"usage_type": "call"
}
] |
37340652783
|
from input import Input
from word import Word
from data import Data
from display import Display as dp
from colorama import Fore
import numpy as np
# The Game Object
class LeWord:
# Only gets the mode,
def __init__(self, mode):
self.mode = mode
# The joker of a word is the count of vowels and consonants in it.
def joker(self, true_word, mode=1):
vowels = "AEIOUaeiou"
# checks the mode
if mode == 1:
vowel_c = 0
consonant_c = 0
# for every letter if it is vowel vowel count plus one else consonants plus one
for letter in true_word:
if letter in vowels:
vowel_c += 1
continue
consonant_c += 1
# then displays the joker for the word
dp.display_joker(vowel_c, consonant_c, mode=mode)
else:
# if mode is not 1 then for every word does the same process
for index, word in enumerate(true_word):
vowel_c = 0
consonant_c = 0
# for every letter if it is vowel vowel count plus one else consonants plus one
for letter in word:
if letter in vowels:
vowel_c += 1
continue
consonant_c += 1
# then displays the joker for the word
dp.display_joker(vowel_c, consonant_c, mode=mode, que=index+1)
print()
# starts the game with selected mode
def play_game(self, again):
if self.mode[2] in ["1", "one"]:
self.single_word_game()
else:
self.multi_word_game()
# the game with one-single word
def single_word_game(self):
# Picks the random word
true_word = Word.pick_random_word(self.mode[1])
# Turn true word into array of individual letters and indexes using numpy library
true_arr = np.array(list(true_word.upper()))
# Turn the array into [item id found] type
true_idx = [[item, idx, None] for idx, item in enumerate(true_arr)]
attempt = 0
guesses = []
# while the attempt count is less than the word length + 1
while attempt < self.mode[1] + 1:
# gets the guess from the user
guess_word = Input.ask_user_input(self.mode[1], attempt)
# looks for special inputs
if not guess_word:
break
elif guess_word == "JOKER":
self.joker(true_word, mode=self.mode[3])
continue
else:
# then appends the guess
guesses.append(guess_word)
# Turn guess into array of individual letters and indexes using numpy library
guess_arr = np.array(list(guess_word))
# Turn the array into [item id found] type
guess_idx = [[item, idx, None] for idx, item in enumerate(guess_arr)]
matched = []
existing = []
# uses numpy to look if any matches between true and guess array pairs then appends to 'matching'
matching = np.where(true_arr == guess_arr)[0]
# for the matches between two array puts 'YES' to found
for item in matching:
matched.append(guess_idx[item][0])
guess_idx[item][2], true_idx[item][2] = 'YES', 'YES'
# gets the unfounded words
rem_guess = [item for item in guess_idx if item[2] != 'YES']
rem_true = [item for item in true_idx if item[2] != 'YES']
# for the letters that unfounds looks if they are exist in somewhere in true word
# if it is then marks found with 'EX'
for guess in rem_guess:
for true in rem_true:
if guess[0] == true[0]:
if list(true_arr).count(guess[0]) > (matched.count(guess[0]) + existing.count(guess[0])):
existing.append(guess[0])
guess[2], true[2] = 'EX', 'EX'
else:
continue
# Colors and marks letters based on match, exist, or not exist
final = Word.mark_letters(guess_idx)
# adds space to make it identical in table
for letter in final:
letter += " "
# Turn current guess into table
guess_df = Word.build_df(attempt, final, attempt)
print(guess_df)
# checks whether all the letters are found or not.
if guess_word.lower() == true_word.lower():
# if it is then makes the congrats
dp.display_cong(1, true_word=true_word.upper())
# and asks the user want to play again
again = True if input(Fore.RED + "-->" + Fore.RESET).lower() == "y" else False
imp = Input.check_input(str(again))
if imp == "m":
return False
print()
# finally writes the data into the csv file
Data.write_game_data([self.mode[2], true_word, len(true_word), attempt + 1, guesses,
True if len(true_word) > attempt + 1 else False])
return again
else:
# if not plus ones the attempt count and continues
attempt += 1
# if attempt count is at limit then
if attempt == self.mode[1] + 1:
# gives the fail message
dp.display_fail(1, true_word)
# and asks the user want to play again
again = True if input(Fore.RED + "-->" + Fore.RESET).lower() == "y" else False
imp = Input.check_input(str(again))
if imp == "m":
return False
print()
# finally writes the data into the csv file
Data.write_game_data([self.mode[2], true_word, len(true_word), attempt + 1, guesses,
True if len(true_word) > attempt + 1 else False])
return again
def multi_word_game(self):
founded_words = []
# Picks the random words
true_word = Word.pick_random_word(self.mode[1], int(self.mode[3]))
# Turn true words into array of individual letters and indexes using numpy library
true_arr = np.array(list([j for j in i.lower()] for i in true_word))
# Turn the array into [item id found] type
true_idx = [[[item, idx, None] for idx, item in enumerate(element)] for element in true_arr]
attempt = 0
guesses_word = []
# while the attempt count is less than the word length + 5
while attempt < self.mode[1] + 5:
# gets the guess from the user
guess_word = Input.ask_user_input(self.mode[1], attempt)
# looks for special inputs
if not guess_word:
break
elif guess_word == "JOKER":
self.joker(true_word, mode=self.mode[3])
continue
else:
# then appends the guess
guesses_word.append(guess_word)
# Turn guess into array of individual letters and indexes using numpy library
guess_arr = np.array(list(guess_word.lower()))
# Turn the array into [item id found] type
guess_idx = [[[item, idx, None] for idx, item in enumerate(guess_arr)] for i in range(int(self.mode[3]))]
matched = []
existing = []
matches = []
# for every word in true words
for element in true_arr:
# uses numpy to look if any matches between true and guess array pairs then appends to 'matched'
matches.append(np.where(element == guess_arr)[0])
# looks if other words founded before if it is then displays them
for founds in founded_words:
index, word = founds[0], founds[1]
guess_idx[index] = [[item, idx, 'YES'] for idx, item in enumerate(word)]
# for the matches between two array puts 'YES' to found
for index, match in enumerate(matches):
for item in match:
matched.append([index, guess_idx[index][item][0]])
guess_idx[index][item][2], true_idx[index][item][2] = 'YES', 'YES'
# gets the unfounded letters
rem_guess = []
for element in guess_idx:
word = []
for item in element:
if item[2] != 'YES':
word.append(item)
rem_guess.append(word)
# gets the unfounded letters
rem_true = []
for element in true_idx:
word = []
for item in element:
if item[2] != 'YES':
word.append(item)
rem_true.append(word)
# for the letters that unfounds looks if they are exist in somewhere in true word
# if it is then marks found with 'EX'
for index_g, guesses in enumerate(rem_guess):
for guess in guesses:
for index, word in enumerate(rem_true):
for true in word:
if index_g == index:
if guess[0] == true[0]:
if list(true_arr[index]).count(guess[0]) > \
(matched.count(guess[0]) + existing.count(guess[0])):
existing.append(guess[0])
guess[2], true[2] = 'EX', 'EX'
else:
continue
# Colors and marks letters based on match, exist, or not exist
final = Word.mark_letters(guess_idx, mode=self.mode[3])
# adds space to make it identical in table
for letter in final:
letter += " "
# Turn current guess into table
guess_df = Word.build_df(attempt, final, attempt, mode=int(self.mode[3]))
print(guess_df)
# if any word is founded then adds them to founded words
for index, word in enumerate(true_word):
if guess_word.lower() == word.lower():
founded_words.append([index, word])
# checks whether all the letters are found or not.
if len(true_word) == len(founded_words):
# if it is then makes the congrats
dp.display_cong(len(true_word), true_word=true_word)
# and asks the user want to play again
again = True if input(Fore.RED + "-->" + Fore.RESET).lower() == "y" else False
imp = Input.check_input(str(again))
if imp == "m":
return False
print()
# finally writes the data into the csv file
Data.write_game_data([self.mode[2], true_word, len(true_word), attempt + 1, guesses_word,
True if len(true_word) > attempt + 1 else False])
return again
else:
# if not plus ones the attempt count and continues
attempt += 1
# if attempt count is at limit then
if attempt == self.mode[1] + 5:
# gives the fail message
dp.display_fail(len(true_word), true_word)
# and asks the user want to play again
again = True if input(Fore.RED + "-->" + Fore.RESET).lower() == "y" else False
imp = Input.check_input(str(again))
if imp == "m":
return False
print()
# finally writes the data into the csv file
Data.write_game_data([self.mode[2], true_word, len(true_word), attempt + 1, guesses_word,
True if len(true_word) > attempt + 1 else False])
return again
# Starts the LE-WORD
@staticmethod
def start_le_word():
opening = True
again = False
# Displays the menu for user to choose
mode = dp.display_menu()
# Until the user types mode 'quit' the game continues
while mode[0] != "quit":
# if it is not the first time and not the again after the game prints menu
if not opening and not again:
mode = dp.display_menu()
if mode[0] == "quit":
break
# if choice statics displays statics
if mode[0] == "statics":
dp.display_statics()
# if choice rules displays rules
elif mode[0] == "rules":
dp.display_rules()
opening = False
# Creates a LeWord object
game = LeWord(mode)
# if the mode is play
if mode[0] == "play":
# then looks at the game mode
if mode[2] in ["one", "1"]:
# if single then single_word_game
again = game.single_word_game()
elif mode[2] == "multi":
# if multi then multi_word_game
again = game.multi_word_game()
# after game ends if user wants to play again
if again:
# asks user to play in which mode
a, b, c = Input.ask_mode()
if not mode:
opening = False
again = False
# assigns the mode and continues
mode = ["play", a, b, c]
|
mburaozkan/LeWord-The-Word-Game
|
game.py
|
game.py
|
py
| 14,751 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "display.Display.display_joker",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "display.Display",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "display.Display.display_joker",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "display.Display",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "word.Word.pick_random_word",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "word.Word",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "input.Input.ask_user_input",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "input.Input",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "word.Word.mark_letters",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "word.Word",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "word.Word.build_df",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "word.Word",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "display.Display.display_cong",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "display.Display",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RED",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "input.Input.check_input",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "input.Input",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "data.Data.write_game_data",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "data.Data",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "display.Display.display_fail",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "display.Display",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RED",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "input.Input.check_input",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "input.Input",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "data.Data.write_game_data",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "data.Data",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "word.Word.pick_random_word",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "word.Word",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "input.Input.ask_user_input",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "input.Input",
"line_number": 185,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "word.append",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "word.append",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "word.Word.mark_letters",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "word.Word",
"line_number": 257,
"usage_type": "name"
},
{
"api_name": "word.Word.build_df",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "word.Word",
"line_number": 264,
"usage_type": "name"
},
{
"api_name": "word.lower",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "display.Display.display_cong",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "display.Display",
"line_number": 276,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RED",
"line_number": 278,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 278,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 278,
"usage_type": "attribute"
},
{
"api_name": "input.Input.check_input",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "input.Input",
"line_number": 279,
"usage_type": "name"
},
{
"api_name": "data.Data.write_game_data",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "data.Data",
"line_number": 285,
"usage_type": "name"
},
{
"api_name": "display.Display.display_fail",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "display.Display",
"line_number": 295,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RED",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 297,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "input.Input.check_input",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "input.Input",
"line_number": 298,
"usage_type": "name"
},
{
"api_name": "data.Data.write_game_data",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "data.Data",
"line_number": 304,
"usage_type": "name"
},
{
"api_name": "display.Display.display_menu",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "display.Display",
"line_number": 315,
"usage_type": "name"
},
{
"api_name": "display.Display.display_menu",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "display.Display",
"line_number": 321,
"usage_type": "name"
},
{
"api_name": "display.Display.display_statics",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "display.Display",
"line_number": 326,
"usage_type": "name"
},
{
"api_name": "display.Display.display_rules",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "display.Display",
"line_number": 329,
"usage_type": "name"
},
{
"api_name": "input.Input.ask_mode",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "input.Input",
"line_number": 349,
"usage_type": "name"
}
] |
34338376065
|
# -*- coding: utf-8 -*-
import contextlib
import json
import logging
import re
import starlette_werkzeug_debugger
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.responses import JSONResponse
from starlette.routing import Route
from starlette.testclient import TestClient
def inner_error():
local_var = 'inner'
raise RuntimeError("Raised error")
async def raise_error(request):
local_var = 'outer'
inner_error()
async def ok_response(request):
return JSONResponse("ok")
@contextlib.asynccontextmanager
async def lifespan(app):
# only to test scope["type"]
yield
def build_app(**kwargs):
middleware = [
Middleware(starlette_werkzeug_debugger.WerkzeugDebugMiddleware, **kwargs)
]
return Starlette(debug=True, middleware=middleware, lifespan=lifespan, routes=[
Route('/', raise_error),
Route('/ok/', ok_response),
])
def get_middleware(app):
return app.middleware_stack.app
def test_correct_response():
app = build_app()
with TestClient(app) as client:
response = client.get('/ok/')
assert response.status_code == 200
assert response.content == b'"ok"'
def test_error_response():
app = build_app()
client = TestClient(app)
response = client.get('/')
assert response.status_code == 500
assert b"Werkzeug Debugger" in response.content
def test_serve_static():
app = build_app()
client = TestClient(app)
client.get('/')
response = client.get('/', params={'__debugger__': 'yes', 'cmd': 'resource', 'f': 'style.css'})
assert response.status_code == 200
assert response.headers['content-type'].startswith('text/css')
def test_printpin(caplog):
caplog.set_level(logging.INFO)
app = build_app(evalex=True, pin_security=True, pin_logging=True)
client = TestClient(app)
client.get('/')
middleware = get_middleware(app)
middleware.pin = '4852'
# dont' print anything
response = client.get('/', params={'__debugger__': 'yes', 'cmd': 'printpin', 's': middleware.secret + 'x'})
assert middleware.pin not in caplog.text
response = client.get('/', params={'__debugger__': 'yes', 'cmd': 'printpin', 's': middleware.secret})
assert middleware.pin in caplog.text
def test_pinauth():
app = build_app(evalex=True, pin_security=True, pin_logging=True)
client = TestClient(app)
client.get('/')
middleware = get_middleware(app)
middleware.pin = '4852'
# wrong secret
response = client.get('/', params={'__debugger__': 'yes', 'cmd': 'pinauth', 'pin': middleware.pin, 's': middleware.secret + 'x'})
assert response.status_code == 500
# wrong pin
response = client.get('/', params={'__debugger__': 'yes', 'cmd': 'pinauth', 'pin': middleware.pin + '5', 's': middleware.secret})
assert response.status_code == 200
response_content = json.loads(response.content.decode('utf-8'))
assert not response_content['auth']
# correct pin
response = client.get('/', params={'__debugger__': 'yes', 'cmd': 'pinauth', 'pin': middleware.pin, 's': middleware.secret})
assert response.status_code == 200
response_content = json.loads(response.content.decode('utf-8'))
assert response_content['auth']
assert middleware.pin_cookie_name in response.cookies
def test_console():
app = build_app(evalex=True, pin_security=True, pin_logging=True)
client = TestClient(app)
exception_content = client.get('/').content.decode('utf-8')
middleware = get_middleware(app)
middleware.pin = '4852'
# login
response = client.get('/', params={'__debugger__': 'yes', 'cmd': 'pinauth', 'pin': middleware.pin, 's': middleware.secret})
cookies = response.cookies
frame_ids = re.findall(r'(?:frame-(\d+))', exception_content)
# content from inner variable
response = client.get('/', params={'__debugger__': 'yes', 'cmd': 'local_var', 'frm': frame_ids[-1], 's': middleware.secret})
assert b'inner' in response.content
# content from outer variable
response = client.get('/', params={'__debugger__': 'yes', 'cmd': 'local_var', 'frm': frame_ids[-2], 's': middleware.secret})
assert b'outer' in response.content
|
mireq/starlette-werkzeug-debugger
|
tests/test_debugger.py
|
test_debugger.py
|
py
| 4,009 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "starlette.responses.JSONResponse",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "contextlib.asynccontextmanager",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "starlette.middleware.Middleware",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "starlette_werkzeug_debugger.WerkzeugDebugMiddleware",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "starlette.applications.Starlette",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "starlette.routing.Route",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "starlette.routing.Route",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "starlette.testclient.TestClient",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "starlette.testclient.TestClient",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "starlette.testclient.TestClient",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "starlette.testclient.TestClient",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "starlette.testclient.TestClient",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "starlette.testclient.TestClient",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 127,
"usage_type": "call"
}
] |
11313840170
|
import numpy as np
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import string
import tqdm
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
class preprocessing:
# Dimension adalah dimensi vektor embedding yang digunakan
# Embedder adalah object fasttext dari library gensim
def __init__(self,dimension,embedder):
self.dimension=dimension
self.embedder=embedder
# Digunakan untuk menghitung jumlah masing2 kata
# Input berupa list of list of token
# Output berupa dictionary yang memetakan kata menjadi frekuensi kemunculan kata tersebut
def word_count(self,sentences):
counts = dict()
for sentence in sentences:
for word in sentence:
if word in counts:
counts[word] += 1
else:
counts[word] = 1
return counts
# Melakukan filtering data berdasarkan kemunculan kata, jumlah karakter pada kata, jumlah kata pada kalimat
# Kata dengah jumlah kemunculan < frequency akan dibuang
# Kata dengan jumlah karakter < word_length akan dibuang
# Kalimat dengan jumlah kata < N_words akan dibuang
# Output berupa list of list of token(features) dan list of string(labels)
def getFilteredData(self,product_title,labels,frequency, N_words, word_length):
result=self.word_count(product_title)
new_product_title=[]
for sentence in tqdm.tqdm(product_title):
new_product_title.append([word for word in sentence if result[word]>=frequency and len(word)>=word_length])
new_features=[]
new_labels=[]
for index,title in tqdm.tqdm(enumerate(new_product_title)):
if(len(title)>=N_words):
new_features.append(title)
new_labels.append(labels[index])
return new_features,new_labels
# Method untuk menghasilkan features berupa Tf-Idf dari input
# Input berupa list of list of token
# Output berupa vektor tfidf(list of list of real_value), object CountVectorizer, object TfidfTransformer
def getTfIdf(self,new_product_title):
concatenated_product_title=[]
for sentence in tqdm.tqdm(new_product_title):
concatenated_product_title.append(" ".join(sentence))
cv=CountVectorizer()
result=cv.fit_transform(concatenated_product_title)
tftransformer = TfidfTransformer(smooth_idf=False)
final_result=tftransformer.fit_transform(result)
return final_result,cv,tftransformer
# Method untuk menghapus angka dan tanda baca, serta melakukan tokenizing kata
# Input berupa list of string
# Output berupa list of list of token
def tokenize(self,input_string):
input_string=''.join(i for i in input_string if not i.isdigit())
result_string=input_string.lower()
target_punctuations=string.punctuation
for punctuation in target_punctuations:
result_string=result_string.replace(punctuation, ' ')
result_string=result_string.strip(' ').split()
return result_string
# Method untuk mengubah kata menjadi vektor fasttext
# Input berupa token
# Output berupa vektor dengan dimensi self.dimension
def vectorize_word(self,product_title):
try:
result=self.embedder[product_title]
except KeyError:
result=0
return result
# Method untuk mengubah kalimat(list of token) menjadi vektor fasttext
# Penggabungan vektor kata menjadi vektor kalimat menggunakan penjumlahan vektor
# Vektor kata dapat diberi bobot berupa nilai tf-idf dari kata tersebut
# doc_occ adalah dictionary yang memetakan kata menjadi jumlah kemunculan kata tersebut pada seluruh dokumen
# total_doc adalah jumlah seluruh dokumen
# Untuk referensi lebih lanjut dari doc_occ dan total_doc, silakan melihat rumus tf-idf
def vectorize_sentence(self,input_sentence,doc_occ=None,total_doc=None):
if(False):
N_success=0
result_vector=np.zeros(self.dimension)
for word in input_sentence:
result_vector+=self.vectorize_word(word)
if(np.sum(self.vectorize_word(word))!=0):
N_success+=1
if(N_success<2):
result_vector=np.zeros(self.dimension)
return result_vector
else:
N_success=0
result_vector=np.zeros(self.dimension)
ll=len(input_sentence)
for word in input_sentence:
c=0
for word2 in input_sentence:
if(word==word2):
c+=1
if(word in list(doc_occ)):
result_vector+=(self.vectorize_word(word)*((c/ll)*(np.log(total_doc/doc_occ[word]))))
else:
result_vector+=(self.vectorize_word(word))
if(np.sum(self.vectorize_word(word))!=0):
N_success+=1
if(N_success<2):
result_vector=np.zeros(self.dimension)
return result_vector
# Method yang merupakan pipeline preprocessing dari data mentah menjadi data siap pakai untuk klasifikasi
# Input berupa list of string, list of string, dan object LabelEncoder(optional, jika ingin menggunakan object custom)
# Output berupa pandas dataframe(features dan labels tergabung menjadi satu) dengan nama kolom "Labels" untuk labels
# ,nama kolom angka 1-100 untuk features, dan object LabelEncoder(jika user tidak menyediakan LabelEncoder custom)
def preprocess_data(self,features,labels,encoder=None):
embedded_data=pd.DataFrame()
print("TOKENIZE DATA")
embedded_data["Features"]=[self.tokenize(title) for title in tqdm.tqdm(features)]
print("APPLYING FILTER")
nf,nl=self.getFilteredData(embedded_data["Features"],list(labels),50,2,3)
embedded_data=pd.DataFrame()
embedded_data["Features"]=nf
voc=set()
for sentence in tqdm.tqdm(embedded_data["Features"]):
for word in sentence:
voc.add(word)
total_doc=len(embedded_data["Features"])
doc_occ={}
for element in tqdm.tqdm(list(voc)):
count_occ=0
for sentence in embedded_data["Features"]:
if (element in sentence):
count_occ+=1
doc_occ[element]=count_occ
print("ENCODING LABELS")
if(encoder==None):
label_encoder=LabelEncoder()
embedded_data["Labels"]=label_encoder.fit_transform(nl)
else:
label_encoder=encoder
embedded_data["Labels"]=label_encoder.transform(nl)
print("CONVERTING SENTENCE TO VECTOR")
embedded_data["Features Vector"]=[self.vectorize_sentence(title,doc_occ,total_doc) for title in tqdm.tqdm(embedded_data["Features"])]
print("SAVE VECTOR TO PANDAS DATAFRAME")
for i in tqdm.tqdm(range(self.dimension)):
embedded_data[i]=[value[i] for value in embedded_data["Features Vector"]]
embedded_data = embedded_data[[*range(self.dimension),"Labels"]]
if(encoder==None):
return embedded_data, label_encoder
else:
return embedded_data
# Input berupa 2 list of string dan jumlah top N class yang diinginkan
# Output berupa data dengan format sama seperti input tetapi hanya mengandung top N class
def getFilteredClasses(self,product_title,labels,top_N):
print("1/3")
sorted_by_value = sorted(self.class_count(labels).items(), key=lambda kv: kv[1])
valid_class=[value[0] for value in sorted_by_value[-top_N:]]
print("2/3")
product_title=list(product_title)
new_features=[]
new_labels=[]
for index,label in tqdm.tqdm(enumerate(labels)):
if(label in valid_class):
new_labels.append(label)
new_features.append(product_title[index])
return new_features,new_labels
# Menghitung nilai Tf-Idf dari suatu kata
# Input berupa nilai real yang dibutuhkan untuk menghitung Tf-Idf
# Output berupa nilai Tf-Idf
def tfidf_word(self,total_occ,total_words,doc_occ,total_doc):
return (total_occ/total_words)*np.log(total_doc/doc_occ)
# Method untuk menghasilkan features berupa Tf-Idf dari input tetapi hanya menggunakan kelas yang terdapat di vocab
# Input berupa list of list of token dan list of string(vocab)
# Output berupa vektor tfidf(list of list of real_value), object CountVectorizer, object TfidfTransformer
def getTfIdfCustom(self,new_product_title,vocab):
print("1/3")
concatenated_product_title=[]
for sentence in tqdm.tqdm(new_product_title):
concatenated_product_title.append(" ".join(sentence))
print("2/3")
cv=CountVectorizer(vocabulary=vocab)
result=cv.fit_transform(concatenated_product_title)
print("3/3")
tftransformer = TfidfTransformer(smooth_idf=False)
final_result=tftransformer.fit_transform(result)
return final_result,cv,tftransformer
# Menghitung frekuensi kemunculan setiap kata dari list of list of token
# Output berupa dictionary kata dan frekuensi kemunculannya
def word_count(self,sentences):
counts = dict()
print("1/1")
for sentence in sentences:
for word in sentence:
if word in counts:
counts[word] += 1
else:
counts[word] = 1
return counts
# Menghitung frekuensi kemunculan setiap kata dari list of token
# Output berupa dictionary kata dan frekuensi kemunculannya
def class_count(self,words):
counts = dict()
for word in words:
if word in counts:
counts[word] += 1
else:
counts[word] = 1
return counts
# Sama seperti method word_count, tetapi yang dihitung hanya data yang termasuk kelas target
def word_count_label(self,sentences,labels,target):
counts = dict()
print("1/1")
for index,sentence in enumerate(sentences):
if(labels[index]==target):
for word in sentence:
if word in counts:
counts[word] += 1
else:
counts[word] = 1
return counts
|
dryantl/product-title-classification
|
preprocessing_pipeline.py
|
preprocessing_pipeline.py
|
py
| 10,629 |
python
|
id
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "tqdm.tqdm",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.CountVectorizer",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.TfidfTransformer",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "string.punctuation",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.LabelEncoder",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.CountVectorizer",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.TfidfTransformer",
"line_number": 209,
"usage_type": "call"
}
] |
25070989865
|
import logging
from django.urls import path
from rest_framework import status
from rest_framework.response import Response
from rest_framework.request import Request
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
from purplship.server.core.views.api import APIView
from purplship.server.proxy.router import router
from purplship.server.serializers import SerializerDecorator
from purplship.server.core.gateway import Pickups
from purplship.server.core.serializers import (
PickupCancelRequest,
PickupUpdateRequest,
OperationResponse,
PickupResponse,
PickupRequest,
ErrorResponse,
TestFilters,
MODELS,
)
logger = logging.getLogger(__name__)
ENDPOINT_ID = "@" # This endpoint id is used to make operation ids unique make sure not to duplicate
CARRIER_NAMES = list(MODELS.keys())
class PickupDetails(APIView):
@swagger_auto_schema(
tags=['Proxy'],
operation_id=f"{ENDPOINT_ID}schedule_pickup",
operation_summary="Schedule a pickup",
query_serializer=TestFilters(),
request_body=PickupRequest(),
responses={200: PickupResponse(), 400: ErrorResponse()},
manual_parameters=[
openapi.Parameter('carrier_name', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, enum=CARRIER_NAMES),
],
)
def post(self, request: Request, carrier_name: str):
"""
Schedule one or many parcels pickup
"""
test_filter = SerializerDecorator[TestFilters](data=request.query_params).data
payload = SerializerDecorator[PickupRequest](data=request.data).data
response = Pickups.schedule(payload, context=request, carrier_name=carrier_name, **test_filter)
return Response(PickupResponse(response).data, status=status.HTTP_201_CREATED)
@swagger_auto_schema(
tags=['Proxy'],
operation_id=f"{ENDPOINT_ID}update_pickup",
operation_summary="Update a pickup",
query_serializer=TestFilters(),
request_body=PickupUpdateRequest(),
responses={200: PickupResponse(), 400: ErrorResponse()},
manual_parameters=[
openapi.Parameter('carrier_name', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, enum=CARRIER_NAMES),
],
)
def put(self, request: Request, carrier_name: str):
"""
Modify a scheduled pickup
"""
test_filter = SerializerDecorator[TestFilters](data=request.query_params).data
payload = SerializerDecorator[PickupUpdateRequest](data=request.data).data
response = Pickups.update(payload, context=request, carrier_name=carrier_name, **test_filter)
return Response(PickupResponse(response).data, status=status.HTTP_200_OK)
class PickupCancel(APIView):
@swagger_auto_schema(
tags=['Proxy'],
operation_id=f"{ENDPOINT_ID}cancel_pickup",
operation_summary="Cancel a pickup",
query_serializer=TestFilters(),
request_body=PickupCancelRequest(),
responses={200: OperationResponse(), 400: ErrorResponse()},
manual_parameters=[
openapi.Parameter('carrier_name', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, enum=CARRIER_NAMES),
],
)
def post(self, request: Request, carrier_name: str):
"""
Cancel a pickup previously scheduled
"""
test_filter = SerializerDecorator[TestFilters](data=request.query_params).data
payload = SerializerDecorator[PickupCancelRequest](data=request.data).data
response = Pickups.cancel(payload, context=request, carrier_name=carrier_name, **test_filter)
return Response(OperationResponse(response).data, status=status.HTTP_200_OK)
router.urls.append(path('proxy/pickups/<carrier_name>', PickupDetails.as_view(), name="pickup-details"))
router.urls.append(path('proxy/pickups/<carrier_name>/cancel', PickupCancel.as_view(), name="pickup-cancel"))
|
danh91/purplship
|
server/modules/proxy/purplship/server/proxy/views/pickup.py
|
pickup.py
|
py
| 3,927 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "purplship.server.core.serializers.MODELS.keys",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "purplship.server.core.serializers.MODELS",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "purplship.server.core.views.api.APIView",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "rest_framework.request.Request",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "purplship.server.serializers.SerializerDecorator",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "purplship.server.core.serializers.TestFilters",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "purplship.server.serializers.SerializerDecorator",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "purplship.server.core.serializers.PickupRequest",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "purplship.server.core.gateway.Pickups.schedule",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "purplship.server.core.gateway.Pickups",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "purplship.server.core.serializers.PickupResponse",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_201_CREATED",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "drf_yasg.utils.swagger_auto_schema",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "purplship.server.core.serializers.TestFilters",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "purplship.server.core.serializers.PickupRequest",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "purplship.server.core.serializers.PickupResponse",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "purplship.server.core.serializers.ErrorResponse",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi.Parameter",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "drf_yasg.openapi.IN_PATH",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "drf_yasg.openapi.TYPE_STRING",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.request.Request",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "purplship.server.serializers.SerializerDecorator",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "purplship.server.core.serializers.TestFilters",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "purplship.server.serializers.SerializerDecorator",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "purplship.server.core.serializers.PickupUpdateRequest",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "purplship.server.core.gateway.Pickups.update",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "purplship.server.core.gateway.Pickups",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "purplship.server.core.serializers.PickupResponse",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "drf_yasg.utils.swagger_auto_schema",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "purplship.server.core.serializers.TestFilters",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "purplship.server.core.serializers.PickupUpdateRequest",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "purplship.server.core.serializers.PickupResponse",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "purplship.server.core.serializers.ErrorResponse",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi.Parameter",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "drf_yasg.openapi.IN_PATH",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "drf_yasg.openapi.TYPE_STRING",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.core.views.api.APIView",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "rest_framework.request.Request",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "purplship.server.serializers.SerializerDecorator",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "purplship.server.core.serializers.TestFilters",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "purplship.server.serializers.SerializerDecorator",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "purplship.server.core.serializers.PickupCancelRequest",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "purplship.server.core.gateway.Pickups.cancel",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "purplship.server.core.gateway.Pickups",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "purplship.server.core.serializers.OperationResponse",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "drf_yasg.utils.swagger_auto_schema",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "purplship.server.core.serializers.TestFilters",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "purplship.server.core.serializers.PickupCancelRequest",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "purplship.server.core.serializers.OperationResponse",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "purplship.server.core.serializers.ErrorResponse",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi.Parameter",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "drf_yasg.openapi.IN_PATH",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "drf_yasg.openapi.TYPE_STRING",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.proxy.router.router.urls.append",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "purplship.server.proxy.router.router.urls",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.proxy.router.router",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "purplship.server.proxy.router.router.urls.append",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "purplship.server.proxy.router.router.urls",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "purplship.server.proxy.router.router",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 102,
"usage_type": "call"
}
] |
12938444153
|
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from jeopardy.views import (
QuestionList,
QuestionById,
QuestionRandom,
PlayerList,
PlayerByName,
PlayerById,
PlayerQuestionById,
PlayerQuestionByName,
)
from rest_framework.urlpatterns import format_suffix_patterns
urlpatterns = [
url(r'^questions/$', QuestionList.as_view()),
url(r'^questions/id/(?P<pk>[0-9]+)/$', QuestionById.as_view()),
url(r'^questions/random/$', QuestionRandom.as_view()),
url(r'^players/$', PlayerList.as_view()),
url(r'^players/name/(?P<name>.+)/$', PlayerByName.as_view()),
url(r'^players/id/(?P<pk>[0-9]+)/$', PlayerById.as_view()),
url(r'^players/name/(?P<name>.+)/question/(?P<question_id>[0-9]+)/$', PlayerQuestionByName.as_view()),
url(r'^players/id/(?P<player_id>[0-9]+)/question/(?P<question_id>[0-9]+)/$', PlayerQuestionById.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
ryanwholey/jeopardy_bot
|
trabek_bot/jeopardy/urls.py
|
urls.py
|
py
| 982 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "django.conf.urls.url",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "jeopardy.views.QuestionList.as_view",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "jeopardy.views.QuestionList",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "jeopardy.views.QuestionById.as_view",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "jeopardy.views.QuestionById",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "jeopardy.views.QuestionRandom.as_view",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "jeopardy.views.QuestionRandom",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "jeopardy.views.PlayerList.as_view",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "jeopardy.views.PlayerList",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "jeopardy.views.PlayerByName.as_view",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "jeopardy.views.PlayerByName",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "jeopardy.views.PlayerById.as_view",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "jeopardy.views.PlayerById",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "jeopardy.views.PlayerQuestionByName.as_view",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "jeopardy.views.PlayerQuestionByName",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "jeopardy.views.PlayerQuestionById.as_view",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "jeopardy.views.PlayerQuestionById",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "rest_framework.urlpatterns.format_suffix_patterns",
"line_number": 26,
"usage_type": "call"
}
] |
12864757051
|
import streamlit as st
import pandas as pd
import numpy as np
import re
import json
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import math
import warnings
warnings.filterwarnings('ignore')
from PIL import Image
# Page setup
st.set_page_config(page_title = "Python Tweets Search Engine", page_icon = "🐍", layout = "wide")
st.title("Python Tweets Search Engine")
df = pd.read_csv('preprocessed_data.csv').fillna('')
inverted_index = json.load(open("inverted_index.json"))
# Define a function to tokenize and clean the text
def clean_text(text):
text = re.sub(r"http\S+", "", text) # Remove URLs
text = re.sub(r'[^\w\s]', '', text) # Remove punctuation
text = text.lower() # Convert text to lowercase
return text.split()
# ------------------------------------------------------------------------------------------------------------
# Define the Boolean model function
def boolean_model(query):
#corpus = pd.read_csv('preprocessed_data.csv')['content'].tolist()
corpus_raw = pd.read_csv('raw_data.csv')
# Pre-process the query
query = clean_text(query)
# Split query into terms
if not query:
return []
terms = query
# Find matching documents for each term
results = []
#univ_set = set([x for x in range(len(corpus_raw))])
for i, term in enumerate(terms):
if term in inverted_index:
if terms[i-1] != 'not':
results.append(inverted_index[term])
else:
#results.append(univ_set.difference(set(inverted_index[term])))
pass
else:
results.append(set())
#print(results)
# Combine the sets using Boolean operators
combined_results = set()
for i, term_result in enumerate(results):
term_result = set(term_result) # convert list to set
if i == 0:
combined_results = term_result
else:
if terms[i-1] == 'and':
combined_results = combined_results.intersection(term_result)
elif terms[i-1] == 'or':
combined_results = combined_results.union(term_result)
# Get the documents matching all terms
# matching_docs = [corpus[i] for i in combined_results]
df = corpus_raw
return df[df.index.isin(combined_results)]
# ------------------------------------------------------------------------------------------------------------
# Define a function to handle wildcard queries
def handle_wildcard_query(query):
pattern = query.replace('*', '.*')
regex = re.compile(pattern)
matching_terms = [term for term in inverted_index.keys() if regex.match(term)]
doc_ids = set([doc_id for term in matching_terms for doc_id in inverted_index[term]])
return doc_ids
# ------------------------------------------------------------------------------------------------------------
# Define a function to handle phrase queries
def handle_phrase_query(query):
query = re.sub(r"http\S+", "", query) # Remove URLs
query = re.sub(r'[^\w\s]', '', query) # Remove punctuation
query_terms = query.lower().split()
phrase_docs = []
for i in range(len(df)):
doc = df.iloc[i]
doc_text = doc['content']
for pos in range(len(doc_text.split())):
if doc_text.split()[pos] == query_terms[0]:
match = True
for j in range(1, len(query_terms)):
if pos+j >= len(doc_text.split()):
match = False
break
next_term = doc_text.split()[pos+j]
if not next_term == query_terms[j]:
match = False
break
if match:
phrase_docs.append(i)
break
return phrase_docs
# ------------------------------------------------------------------------------------------------------------
# Define a function to calculate precision and recall
def calc_precision_recall(relevant_docs, retrieved_docs):
tp = len(set(relevant_docs) & set(retrieved_docs))
fp = len(retrieved_docs) - tp
fn = len(relevant_docs) - tp
precision = tp / (tp + fp) if tp + fp > 0 else 0
recall = tp / (tp + fn) if tp + fn > 0 else 0
return precision, recall
# ------------------------------------------------------------------------------------------------------------
# Example usage
def query_app(wq, pq):
wildcard_query = wq
phrase_query = pq
wildcard_doc_ids = handle_wildcard_query(wildcard_query)
phrase_doc_ids = handle_phrase_query(phrase_query)
print(f'Wild card query: {wildcard_query}, matching doc ids: {wildcard_doc_ids}')
print(f'Phrase query: {phrase_query}, matching doc ids: {phrase_doc_ids}')
# ------------------------------------------------------------------------------------------------------------
def query_pr_app(wq, pq, relevant_docs):
wildcard_query = wq
phrase_query = pq
wildcard_doc_ids = handle_wildcard_query(wildcard_query)
phrase_doc_ids = handle_phrase_query(phrase_query)
print(f'Wild card query: {wildcard_query}, matching doc ids: {wildcard_doc_ids}')
print(f'Phrase query: {phrase_query}, matching doc ids: {phrase_doc_ids}')
print('---')
print('Evaluation:')
print(f'Number of relevant documents: {len(relevant_docs)}')
wildcard_precision, wildcard_recall = calc_precision_recall(relevant_docs, wildcard_doc_ids)
print(f'Wild card query precision: {wildcard_precision}, recall: {wildcard_recall}')
phrase_precision, phrase_recall = calc_precision_recall(relevant_docs, phrase_doc_ids)
print(f'Phrase query precision: {phrase_precision}, recall: {phrase_recall}')
# ------------------------------------------------------------------------------------------------------------
def retrieve_using_cosine_similarity(query, num_docs = 5):
# Tokenize and clean the query
query_tokens = clean_text(query)
corpus = df['content'].tolist()
corpus_raw = pd.read_csv('raw_data.csv')['content'].tolist()
# Retrieve documents containing at least one query term
candidate_doc_ids = set()
for query_token in query_tokens:
if query_token in inverted_index:
candidate_doc_ids.update(inverted_index[query_token])
# Calculate the cosine similarity between the query and candidate documents
candidate_docs = [corpus[doc_id] for doc_id in candidate_doc_ids]
vectorizer = TfidfVectorizer()
tfidf_matrix = vectorizer.fit_transform(candidate_docs)
query_vector = vectorizer.transform([query])
cosine_similarities = cosine_similarity(query_vector, tfidf_matrix).flatten()
# Sort the candidate documents by cosine similarity in descending order and get the top documents
document_indices = cosine_similarities.argsort()[::-1][:num_docs]
return [corpus.index(candidate_docs[index]) for index in document_indices]
# ------------------------------------------------------------------------------------------------------------
def log_likelihood(query, num_docs):
corpus = df['content'].tolist()
query = re.sub(r"http\S+", "", query) # Remove URLs
query = re.sub(r'[^\w\s]', '', query) # Remove punctuation
query_tokens = query.lower().split()
query_likelihood = {}
for token in query_tokens:
if token in query_likelihood:
query_likelihood[token] += 1
else:
query_likelihood[token] = 1
query_length = sum(query_likelihood.values())
for token in query_likelihood:
query_likelihood[token] = query_likelihood[token] / query_length
# Retrieve the documents that contain any of the query tokens
retrieved_docs = set()
for token in query_tokens:
if token in inverted_index:
retrieved_docs.update(inverted_index[token])
# Compute the likelihood of each retrieved document
doc_likelihoods = {}
for doc_id in retrieved_docs:
doc_tokens = corpus[doc_id].lower().split()
doc_length = len(doc_tokens)
likelihood = 0
for token in query_likelihood:
count = doc_tokens.count(token)
token_likelihood = count / doc_length if count > 0 else 1 / (doc_length + 1)
likelihood += math.log(token_likelihood) * query_likelihood[token]
doc_likelihoods[doc_id] = likelihood
# Rank the retrieved documents by their likelihood
sorted_docs = sorted(doc_likelihoods.items(), key=lambda x: x[1], reverse=True)
# Get the top N documents
document_indices = [index for index, (doc_id, likelihood) in enumerate(sorted_docs[:num_docs]) if doc_id in retrieved_docs]
# Return the indices of the top N documents
return [corpus.index(sorted_docs[index][0]) for index in document_indices]
# ------------------------------------------------------------------------------------------------------------
# Define a function to retrieve documents using cosine similarity with relevance feedback
def retrieve_using_cosine_similarity_with_feedback(query, rel_list, num_docs = 5, alpha = 1, beta = 0.75, gamma = 0.15):
# Transform the query using the vectorizer
corpus = df['content'].tolist()
corpus_raw = pd.read_csv('raw_data.csv')['content'].tolist()
# Create a TF-IDF vectorizer and transform the corpus
vectorizer = TfidfVectorizer()
tfidf_matrix = vectorizer.fit_transform(corpus)
query_vector = vectorizer.transform([query])
# Calculate the cosine similarity between the query and all documents in the corpus
cosine_similarities = cosine_similarity(query_vector, tfidf_matrix).flatten()
# Sort the documents by cosine similarity in descending order and get the top documents
document_indices = cosine_similarities.argsort()[::-1][:num_docs]
top_documents = [(corpus_raw[index], cosine_similarities[index]) for index in document_indices]
# Print the top documents
print(document_indices)
print(f"Showing top {num_docs} documents that are most similar to the query '{query}':\n")
for i, (text, cosine_sim) in enumerate(top_documents):
print(f"Rank {i+1} (Cosine Similarity: {cosine_sim:.4f}):")
print(text)
print("Reason: The document has a high cosine similarity score with the query.\n")
# Get feedback from the user on the relevance of the search results
relevant_doc_indices = []
non_relevant_doc_indices = []
print(rel_list, type(rel_list))
for i in range(len(top_documents)):
if(str(i) in rel_list):
relevant_doc_indices.append(document_indices[i])
else:
non_relevant_doc_indices.append(document_indices[i])
# Calculate the new query vector using the Rocchio algorithm
relevant_doc_vectors = tfidf_matrix[relevant_doc_indices]
non_relevant_doc_vectors = tfidf_matrix[non_relevant_doc_indices]
new_query_vector = alpha * query_vector + beta * relevant_doc_vectors.mean(axis=0) - gamma * non_relevant_doc_vectors.mean(axis=0)
# Calculate the cosine similarity between the new query vector and all documents in the corpus
cosine_similarities = cosine_similarity(np.asarray(new_query_vector), tfidf_matrix).flatten()
# Sort the documents by cosine similarity in descending order and get the top documents
document_indices = cosine_similarities.argsort()[::-1][:num_docs]
top_documents = [(corpus_raw[index], cosine_similarities[index]) for index in document_indices]
print(document_indices, top_documents)
print(type(document_indices), type(top_documents))
# Print the reranked top documents
print(f"\nShowing top {num_docs} reranked documents that are most similar to the query '{query}':\n")
for i, (text, cosine_sim) in enumerate(top_documents):
print(f"Rank {i+1} (Cosine Similarity: {cosine_sim:.4f}):")
print(text)
print("Reason: The document has a high cosine similarity score with the reranked query.\n")
return list(document_indices)
# ------------------------------------------------------------------------------------------------------------
# Test the Boolean model
option = st.selectbox(
'Type of query :',
('Boolean', 'Phrase', 'Wildcard', 'Cosine Similarity' , 'Relevance'))
N_cards_per_row = 3
max_results = 24
image = Image.open("icon.png")
resized_image = image.resize((300, 300))
st.sidebar.image(resized_image, width = 250)
for _ in range(5):
st.sidebar.text("\n")
st.sidebar.text("This app is to serve as a front-end \nfor the tweets dataset search \nengine system implemented for\nAIRIW Assignment 1 in Python.")
df1 = pd.read_csv('raw_data.csv')
st.info("Search tweets by Boolean, Phrase, Wildcard, Cosine , Likelihood or Relevant")
text_search = st.text_input("Enter your query :")
if st.button('Go'):
st.success("Searching... Your query is being processed !!!")
if(option == 'Boolean'):
df_search = boolean_model(text_search)
elif(option == 'Phrase'):
df_search = df1[df1.index.isin(handle_phrase_query(text_search))]
elif(option == 'Wildcard'):
df_search = df1[df1.index.isin(handle_wildcard_query(text_search))]
elif(option == 'Cosine Similarity'):
df_search = df1[df1.index.isin(retrieve_using_cosine_similarity(text_search, max_results))]
# elif(option == 'Log Likelihood'):
# df_search = df1[df1.index.isin(log_likelihood(text_search, max_results))]
elif(option == 'Relevance'):
rel_lis = st.text_input("Enter relevant docs as a list")
if rel_lis:
st.write('Feedback submitted! New results are: ')
df_search = df1[df1.index.isin(retrieve_using_cosine_similarity_with_feedback(text_search, rel_lis.split(','), max_results))]
else:
df_search = df1[df1.index.isin(retrieve_using_cosine_similarity(text_search, max_results))]
df_search = df_search[:max_results]
if text_search:
with st.expander("Click to see dataframe view"):
st.write(df_search)
for n_row, row in df_search.reset_index().iterrows():
i = n_row % N_cards_per_row
if i == 0:
st.write("---")
cols = st.columns(N_cards_per_row, gap = "large")
# draw the card
with cols[n_row % N_cards_per_row]:
st.caption(f"(Result No.: {n_row}) Tweet:")
st.markdown(f"**{row['content'].strip()}**")
st.markdown(f"*{row['publish_date'].strip()}*")
|
smsraj2001/MINI-SEARCH-ENGINE
|
app.py
|
app.py
|
py
| 14,858 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "warnings.filterwarnings",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "streamlit.set_page_config",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "streamlit.title",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.TfidfVectorizer",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.pairwise.cosine_similarity",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.TfidfVectorizer",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.pairwise.cosine_similarity",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.pairwise.cosine_similarity",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "streamlit.selectbox",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 281,
"usage_type": "name"
},
{
"api_name": "streamlit.sidebar.image",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 283,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.text",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 286,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.text",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 287,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "streamlit.info",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "streamlit.text_input",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "streamlit.button",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "streamlit.success",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "streamlit.text_input",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "streamlit.expander",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "streamlit.columns",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "streamlit.caption",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 326,
"usage_type": "call"
}
] |
20968158774
|
import math
from flask import Flask, render_template, request, jsonify
import pickle
import pandas as pd
import numpy as np
import mariadb
import jinja2
conn = mariadb.connect(
user="root",
password="root",
host="localhost",
database="pal_taqdeer")
cur = conn.cursor()
app = Flask(__name__)
@app.route('/')
def index():
query = "select * from student_forecast"
cur.execute(query)
data = list(cur)
return render_template("index.html", data=data)
with open("model.pkl", "rb") as f:
model = pickle.load(f)
@app.route('/result', methods=['POST'])
def result():
age = request.form.get('age')
medu = request.form.get('medu')
fedu = request.form.get('fedu')
failures = request.form.get('failures')
higher = request.form.get('higher')
romantic = request.form.get('romantic')
g1 = request.form.get('g1')
g2 = request.form.get('g2')
gouout = request.form.get('gouout')
if not age or int(age) < 15 or int(age) > 22:
return jsonify({'error' : 'Invalid Age'})
if medu == "":
return jsonify({'error' : 'Invalid Mother Education Status'})
if fedu == "":
return jsonify({'error' : 'Invalid Father Education Status'})
if failures == "" or int(failures) > 4:
return jsonify({'error' : 'Invalid Failures No.'})
if not higher:
return jsonify({'error' : 'Invalid Higher Education Status'})
if not romantic:
return jsonify({'error' : 'Invalid Romantic Status'})
if g1 == "" or int(g1) > 20:
return jsonify({'error' : 'Invalid First Period Grade'})
if g2 == "" or int(g2) > 20:
return jsonify({'error' : 'Invalid Second Period Grade'})
if not int(gouout) or int(gouout) > 5:
return jsonify({'error' : 'Invalid Hang Out Status'})
inputs = [age, medu, fedu, failures,
higher, g1, g2, romantic, gouout]
df = pd.DataFrame([np.array(inputs)], columns=[
'age', 'Medu', 'Fedu', 'failures', 'higher_yes', 'G1', 'G2', 'romantic_no', 'goout'])
finalGrade = model.predict(df)
final_grade_rounded = round(float(finalGrade))
query = f""" INSERT INTO student_forecast (age, medu, fedu, failures, higher, romantic, g1, g2, gouout)
VALUES ('{age}','{medu}','{fedu}','{failures}','{higher}','{romantic}','{g1}','{g2}','{gouout}'); """
cur.execute(query)
conn.commit()
return jsonify({'grade': final_grade_rounded})
if __name__ == "__main__":
app.run(debug=True)
|
sondosaabed/PalTaqdeer
|
app.py
|
app.py
|
py
| 2,489 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "mariadb.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "flask.request.form.get",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 77,
"usage_type": "call"
}
] |
26013197336
|
from homeassistant.config_entries import ConfigEntry
from .const import (
PLATFORM,
PRESET_MODE_HOLIDAY,
PRESET_MODE_MANUAL,
PRESET_MODE_SCHEDULE_1,
PRESET_MODE_SCHEDULE_2,
PRESET_MODE_SCHEDULE_3,
PRESET_MODE_TEMP_OVERRIDE,
PRESET_MODE_ANTIFROST,
BAXI_PRESET_MANUAL,
BAXI_PRESET_SCHEDULE,
)
from homeassistant.components.climate.const import (
HVAC_MODE_OFF,
HVAC_MODE_AUTO,
)
import datetime
from datetime import timedelta
def preset_mode_baxi_to_ha(baxi_mode, program=None):
if baxi_mode == "manual":
return PRESET_MODE_MANUAL
elif baxi_mode == "temporary-override":
return PRESET_MODE_TEMP_OVERRIDE
elif baxi_mode == "anti-frost":
return PRESET_MODE_ANTIFROST
elif baxi_mode == "schedule" and program == 1:
return PRESET_MODE_SCHEDULE_1
elif baxi_mode == "schedule" and program == 2:
return PRESET_MODE_SCHEDULE_2
elif baxi_mode == "schedule" and program == 3:
return PRESET_MODE_SCHEDULE_3
elif baxi_mode == "holiday":
return PRESET_MODE_HOLIDAY
def preset_mode_ha_to_baxi(ha_mode):
if ha_mode == PRESET_MODE_MANUAL:
return BAXI_PRESET_MANUAL, "manual"
elif ha_mode == PRESET_MODE_SCHEDULE_1:
return BAXI_PRESET_SCHEDULE, "1"
elif ha_mode == PRESET_MODE_SCHEDULE_2:
return BAXI_PRESET_SCHEDULE, "2"
elif ha_mode == PRESET_MODE_SCHEDULE_3:
return BAXI_PRESET_SCHEDULE, "3"
def hvac_mode_baxi_to_ha(raw_mode):
if raw_mode == "off":
return HVAC_MODE_OFF
elif raw_mode == "heating-auto":
return HVAC_MODE_AUTO
def hvac_mode_ha_to_baxi(ha_mode):
if ha_mode == HVAC_MODE_AUTO:
return "heating-auto"
elif ha_mode == HVAC_MODE_OFF:
return "off"
def create_override_date(target_time, days_offset):
now = datetime.datetime.now()
override_date = now + timedelta(days=days_offset)
target_hour = int(target_time.split(":")[0])
target_minutes = int(target_time.split(":")[1])
override_date = override_date.replace(
hour=target_hour, minute=target_minutes, second=0, microsecond=0
)
return override_date.isoformat("T", "minutes")
|
vipial1/BAXI_thermostat
|
custom_components/baxi_thermostat/helper.py
|
helper.py
|
py
| 2,194 |
python
|
en
|
code
| 9 |
github-code
|
6
|
[
{
"api_name": "const.PRESET_MODE_MANUAL",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "const.PRESET_MODE_TEMP_OVERRIDE",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "const.PRESET_MODE_ANTIFROST",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "const.PRESET_MODE_SCHEDULE_1",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "const.PRESET_MODE_SCHEDULE_2",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "const.PRESET_MODE_SCHEDULE_3",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "const.PRESET_MODE_HOLIDAY",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "const.PRESET_MODE_MANUAL",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "const.BAXI_PRESET_MANUAL",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "const.PRESET_MODE_SCHEDULE_1",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "const.BAXI_PRESET_SCHEDULE",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "const.PRESET_MODE_SCHEDULE_2",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "const.BAXI_PRESET_SCHEDULE",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "const.PRESET_MODE_SCHEDULE_3",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "const.BAXI_PRESET_SCHEDULE",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.climate.const.HVAC_MODE_OFF",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.climate.const.HVAC_MODE_AUTO",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.climate.const.HVAC_MODE_AUTO",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.climate.const.HVAC_MODE_OFF",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 67,
"usage_type": "call"
}
] |
23713666857
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 7 22:16:45 2023
@author: EMILIANO
"""
import openpyxl
import pandas as pd
##Workbook va en mayusculas la primera
from openpyxl import Workbook
Excelworkbook=openpyxl.load_workbook("H:\Documentos\Practica Pyhton Bond Arg\Dataset bonos arg usd.xlsx")
Excelsheet=Excelworkbook.active
#obtener los titulos
titulos= next(Excelsheet.values)[0:]
bondarg=pd.DataFrame(Excelsheet.values,columns=titulos)
bondarg=bondarg.drop(bondarg.index[0])
# creamos las columnas de diferencia de precio entre AE38-AL30 y Al41 - AL30
bondarg["Diferencia Precio 38-30"]=bondarg[" AE38D "]-bondarg[" AL30D "]
bondarg["Diferencia Precio 41-30"]=bondarg[" AL41D "]-bondarg[" AL30D "]
media_38_30=bondarg["Diferencia Precio 38-30"].mean()
media_41_30=bondarg["Diferencia Precio 41-30"].mean()
# ##Workbook va en mayusculas la primera
# nuevoexcel=Workbook()
# nuevoexcel_sheet=nuevoexcel.active
# nuevoexcel=bondarg
# nuevoexcel.save("datasetarg.xlsx")
# como navegar por columnas con el iloc con cordenas
bono38=bondarg.iloc[0:,5
]
# copiar un "Dataframe" y pegarlo en otro lado Copia 2
# copia2
# bondarg2=bondarg
# # altero el original solo con la columna
# bondarg=bondarg[" AE38D "]
bondarg_head10=bondarg[["Date"," AE38D "]].head(10)
bondarg_tail10=bondarg[["Date"," AE38D "]].tail(10)
bondarg_combinado=bondarg_head10.append(bondarg_tail10)
bondarg_combinado2= bondarg_head10.append(\
[bondarg_tail10,bondarg_head10] \
)
|
emilapuente1/Practica-Pyhton-Bond-Arg
|
Bondarg.py
|
Bondarg.py
|
py
| 1,510 |
python
|
es
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "openpyxl.load_workbook",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 20,
"usage_type": "call"
}
] |
17646581677
|
import boto3
from flask import Flask, request
import json
import os
app = Flask(__name__)
REGION = 'eu-north-1'
TESTING_URL = 'http://localhost:4566' # os.environ['LOCAL_TESTING']
TOPIC_ARN = 'arn:aws:sns:eu-north-1:000000000000:techtalk-sns'
@app.route('/')
def demo_homepage():
return "Welcome to Anusha`s LocalStack Demo."
@app.route('/send', methods=['POST'])
def send_messages():
session = boto3.session.Session(region_name=REGION)
message = (request.data).decode('ascii')
sns = session.client('sns', endpoint_url=TESTING_URL)
response = sns.publish(
TopicArn=TOPIC_ARN,
Subject='Hello',
Message=message
)
return {'response': 'message sent successfully'}
@app.route('/receive')
def receive_messages():
session = boto3.session.Session(region_name=REGION)
result = 'No Message Recieved' # Default
sqs = session.client('sqs', endpoint_url=TESTING_URL)
response = sqs.receive_message(
QueueUrl='http://localhost:4566/000000000000/techtalk'
)
msgs = response.get('Messages')
if msgs:
result = [(json.loads(msg.get('Body'))).get('Message') for msg in msgs if msg.get('Body')]
handles = [{'Id': msg.get('MessageId'), 'ReceiptHandle': msg.get('ReceiptHandle')} for msg in msgs]
sqs.delete_message_batch(
QueueUrl='http://localhost:4566/000000000000/techtalk',
Entries=handles
)
return {'Result': result}
|
anushacassum/mylocalstack
|
app.py
|
app.py
|
py
| 1,458 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "boto3.session.Session",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "boto3.session",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "flask.request.data.decode",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask.request.data",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "boto3.session.Session",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "boto3.session",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 42,
"usage_type": "call"
}
] |
36060772305
|
# #!/usr/bin/env python3
import json
import socket
from utils.save_json import save_json
def initSocket(ip, port, diretorio):
dir = 'src/json/'+diretorio+'.json'
dicionario = ''
try:
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp.bind((ip, port))
tcp.listen(2)
while True:
con, cliente = tcp.accept()
print ('Concetado por', cliente)
while True:
msg = con.recv(12288)
msg = msg.decode()
dicionario += msg
# dicionario.append(msg)
# json_distribuido = json.loads(dicionario)
if not msg:
save_json(dir, json.loads(dicionario))
dicionario = ''
break
except KeyboardInterrupt:
sys.exit(0)
|
AntonioAldisio/FSE-2022-2-Trabalho-1
|
src/servidor/servidor.py
|
servidor.py
|
py
| 847 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "socket.socket",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "utils.save_json.save_json",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 23,
"usage_type": "call"
}
] |
25785453821
|
'''
the application of the matrix
'''
import xlrd
import matplotlib.pyplot as plt
from config import *
from model import Dmu
# plt.rcParams['text.usetex']=True
# # Place the command in the text.latex.preamble using rcParams
# plt.rcParams['text.latex.preamble']=r'\makeatletter \newcommand*{\rom}[1]{\bfseries\expandafter\@slowromancap\romannumeral #1@} \makeatother'
plt.xkcd()
def split(func):
def _wrapper(*args, **kw):
dmus = func(*args, **kw)
return ([item for item in dmus if item.dpei <= 3.33 and item.dyct <= 3.33],
[item for item in dmus if item.dpei > 3.33 and item.dyct <= 3.33],
[item for item in dmus if item.dpei > 3.33 and item.dyct > 3.33],
[item for item in dmus if item.dpei <= 3.33 and item.dyct > 3.33])
return _wrapper
@split
def read_data():
workbook = xlrd.open_workbook(XLS_FILE_NAME)
sheet = workbook.sheets()[SHEET_NO]
dmus = []
for row_index in range(ROW_FROM, ROW_TO+1):
row = sheet.row_values(row_index)[COLUMN_FROM:COLUMN_TO+1]
dmus.append(Dmu(row[0], float(row[1]), float(row[2])))
return dmus
def _write_dmus(dmus, color=None, marker=None):
name = [item.name for item in dmus]
dpeis = [item.dpei for item in dmus]
dpcts = [item.dyct for item in dmus]
plt.scatter(dpeis,dpcts, c=color, marker=marker,zorder=2)
def _generate_dict(dmus):
dmu_dict = {}
for dmu in dmus:
dmu_dict[dmu.name]=[dmu.dpei, dmu.dyct]
return dmu_dict
def draw():
dmus_left_bottom, dmus_right_bottom, dmus_right_top, dmus_left_top = read_data()
# draw point
_write_dmus(dmus_left_bottom, '#76EE00','^')
_write_dmus(dmus_right_bottom, '#EE4000','2')
_write_dmus(dmus_right_top, '#4F94CD','o')
_write_dmus(dmus_left_top, '#DAA520','s')
#decoration
plt.xlim(-0.5, 10.5)
plt.ylim(-0.5, 10.5)
plt.plot([-0.5, 10], [3.33,3.33], '--', c='k', linewidth=0.8)
plt.plot([3.33,3.33],[0, 10.5], '--', c='k', linewidth=0.8)
plt.text(3.33, -0.25, s='3.33', ha='center', va='center', fontsize=9)
plt.text(10.3, 3.33, s='3.33', ha='center', va='center', rotation=90,fontsize=9)
# annotations
for dmu in dmus_right_top:
plt.text(dmu.dpei+0.3, dmu.dyct+0.32, s=dmu.name, ha='center', va='center',fontsize=9)
for dmu in dmus_left_top:
plt.text(dmu.dpei-0.3, dmu.dyct+0.32, s=dmu.name, ha='center', va='center', fontsize=9)
#draw right_bottom
dmu_dict = _generate_dict(dmus_right_bottom)
plt.text(dmu_dict['Hunan'][0]+0.3,dmu_dict['Hunan'][1]-0.32, s='Hunan',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Guizhou'][0]+0.3,dmu_dict['Guizhou'][1]-0.32, s='Guizhou',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Jilin'][0]+0.3,dmu_dict['Jilin'][1]-0.3, s='Jilin',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Anhui'][0]+0.5,dmu_dict['Anhui'][1]-0.05, s='Anhui',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Guangxi'][0]+0.2,dmu_dict['Guangxi'][1]-0.32, s='Guangxi',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Zhejiang'][0]+0.2,dmu_dict['Zhejiang'][1]-0.05, s='Zhejiang',
ha='left', va='center', fontsize=9)
# draw left bottom
dmu_dict.clear()
dmu_dict = _generate_dict(dmus_left_bottom)
plt.text(dmu_dict['Hainan'][0]+0.4,dmu_dict['Hainan'][1]-0.35, s='Hainan',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Beijing'][0]+0.4,dmu_dict['Beijing'][1]-0.35, s='Beijing',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Qinghai'][0]-0.1, dmu_dict['Qinghai'][1]-0.2, s='Qinghai',
ha='right', va='center', fontsize=9)
plt.text(dmu_dict['Shanghai'][0]-0.1, dmu_dict['Shanghai'][1]-0.5, s='Shanghai',
ha='center', va='center', fontsize=9)
plt.plot([dmu_dict['Shanghai'][0],dmu_dict['Shanghai'][0]-0.1],
[dmu_dict['Shanghai'][1], dmu_dict['Shanghai'][1]-0.4],
'-',
c='k',linewidth=0.5,zorder=1)
plt.text(dmu_dict['Xinjiang'][0]-0.1, dmu_dict['Xinjiang'][1]+0.4, s='Xinjiang',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Ningxia'][0]-0.1, dmu_dict['Ningxia'][1]+0.35, s='Ningxia',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Tianjin'][0]+.4, dmu_dict['Tianjin'][1]+0.25, s='Tianjin',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Gansu'][0]+0.45, dmu_dict['Gansu'][1]+0.1, s='Gansu',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Shannxi'][0]+0.1, dmu_dict['Shannxi'][1]+0.4, s='Shannxi',
ha='center', va='center', fontsize=9)
plt.text(dmu_dict['Fujian'][0]+0.3, dmu_dict['Fujian'][1]+0.5, s='Fujian',
ha='left', va='center', fontsize=9)
plt.plot([dmu_dict['Fujian'][0], dmu_dict['Fujian'][0]+0.5],
[dmu_dict['Fujian'][1], dmu_dict['Fujian'][1]+0.4],
'-',
c='k',linewidth=0.5,zorder=1)
plt.text(dmu_dict['Yunnan'][0], dmu_dict['Yunnan'][1]+0.15, s='Yunnan',
ha='left', va='center', fontsize=9)
plt.text(dmu_dict['Chongqing'][0]-0.2, dmu_dict['Chongqing'][1]-0.4, s='Chongqing',
ha='left', va='center', fontsize=9)
plt.text(dmu_dict['Heilongjiang'][0]+0.1, dmu_dict['Heilongjiang'][1]-0.7, s='Heilongjiang',
ha='left', va='center', fontsize=9)
plt.text(dmu_dict['Jiangxi'][0]-0.1, dmu_dict['Jiangxi'][1]-1, s='Jiangxi',
ha='left', va='center', fontsize=9)
plt.plot([dmu_dict['Heilongjiang'][0], dmu_dict['Heilongjiang'][0]+0.4],
[dmu_dict['Heilongjiang'][1], dmu_dict['Heilongjiang'][1]-0.55],
'-',c='k',linewidth=0.5, zorder=1)
plt.plot([dmu_dict['Jiangxi'][0], dmu_dict['Jiangxi'][0]+0.2],
[dmu_dict['Jiangxi'][1], dmu_dict['Jiangxi'][1]-0.9],
'-',c='k',linewidth=0.5, zorder=1)
plt.show()
if __name__ == '__main__':
draw()
|
gaufung/CodeBase
|
PDA/matrix/app.py
|
app.py
|
py
| 6,090 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "matplotlib.pyplot.xkcd",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "xlrd.open_workbook",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "model.Dmu",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 128,
"usage_type": "name"
}
] |
38610058452
|
import subprocess
import re
import skia
import io
from platform import system
from pathlib import Path
from PIL import Image
from xml.etree import ElementTree as ET
from typing import Any, Tuple, Union, List
class SVG:
"""SVG class to load, edit, render, and export svg files using pillow and inkscape."""
if system() == "Darwin":
__SYSTEM_DPI = 72
else:
__SYSTEM_DPI = 96
__WIDTH_HEIGHT_REGEX = "^[1-9]\d*(px|)$|^[1-9]\d*\.?\d*(v(h|w)|\%)$"
__ICO_SIZES = [16, 32, 48, 64, 256]
__RESAMPLING_FILTERS = {
"nearest": 0,
"lanczos": 1,
"bilinear": 2,
"bicubic": 3,
"box": 4,
"hamming": 5,
"rerender": 6
}
def __init__(self, fp: Union[str, Path, bytes]):
"""Create an SVG instance from a svg file.
Args:
fp (str | Path | bytes): The svg filepath.
"""
# Check if filepath is valid
self.fp: Path = Path(fp).resolve()
if not self.fp.exists():
raise FileNotFoundError(f"SVG file '{fp}' does not exist.")
# Load SVG xml file
self.root: ET.Element = ET.parse(fp).getroot()
def __str_to_nu(self, input: str) -> Tuple[float, str]:
"""Extracts size number and units from string.
Args:
input (str): String in the form of '{number}{units}'. Accepts strings containing integers and floats.
Returns:
Tuple[float, str]: The number and unit values.
"""
unit = re.findall("(|mm|px|in|vw|\%)$", input)[0]
num = float(re.findall("^([1-9]\d*\.?\d*|[1-9]\d*)", input)[0])
return num, unit
def __to_px(self, num: float, unit: str, view: Tuple[float, float] = None) -> int:
"""Converts a number of unit types mm, cm, in, vw, vh, and % to pixels.
Args:
number (float): The number to convert to pixels
unit (str): The unit of the number for conversion. Currently supports mm, cm, in, vw, vh, and %.
view (Tuple[float, float]): The svg view box dimensions for units of vw, vh, and %.
Raises:
ValueError: View box is not provided for units of vw, vh, and %.
Returns:
int: The number converted to pixel units.
"""
if unit == 'mm':
num *= (__class__.__SYSTEM_DPI / 25.4)
if unit == 'cm':
num *= (__class__.__SYSTEM_DPI / 2.54)
elif unit == 'in':
num *= __class__.__SYSTEM_DPI
if unit in ["%", "vw", "vh"]:
if not view:
raise ValueError(f"View box is necessary for conversions involving {unit}'s")
sw, sh = view
if unit == "vh":
num *= sh / 100.
else:
num *= sw / 100.
return num
@property
def size(self) -> Tuple[int, int]:
"""The size of the svg file in pixels. Defaults to (300, 150).
"""
viewBox = tuple(float(i) for i in self.root.attrib['viewBox'].split(
)) if 'viewBox' in self.root.attrib else (0, 0, 300, 150)
width = self.root.attrib['width'] if 'width' in self.root.attrib else "100vw"
height = self.root.attrib['height'] if 'height' in self.root.attrib else "100vh"
sw, sh = float(viewBox[2] - viewBox[0]), float(viewBox[3] - viewBox[1])
_, uw = self.__str_to_nu(width)
_, uh = self.__str_to_nu(height)
if uw in ["mm", "in", "cm"]:
sw = self.__to_px(sw, uw)
if uh in ["mm", "in", "cm"]:
sh = self.__to_px(sh, uh)
return int(sw), int(sh)
@property
def viewBox(self) -> Tuple[int, int, int, int]:
"""The viewBox of the svg file. Defaults to '0 0 300 150'."""
if 'viewBox' not in self.root.attrib:
self.root.attrib['viewBox'] = "0 0 300 150"
return tuple(int(i) for i in self.root.attrib['viewBox'].split())
@viewBox.setter
def viewBox(self, value: Tuple[int, int, int, int]):
"""Setter for viewBox."""
print(" ".join(str(v) for v in value))
self.root.attrib['viewBox'] = " ".join(str(v) for v in value)
@property
def width(self) -> str:
"""The width of the svg. Defaults to 100vw."""
return self.__get_attrib('width', '100vw')
@width.setter
def width(self, value: str) -> str:
"""Setter for width."""
self.__set_attrib('width', value, __class__.__WIDTH_HEIGHT_REGEX)
@property
def height(self) -> str:
"""The width of the svg. Defaults to 100vh."""
return self.__get_attrib('height', '100vh')
@height.setter
def height(self, value: str) -> str:
"""Setter for height."""
self.__set_attrib('height', value, __class__.__WIDTH_HEIGHT_REGEX)
def __set_attrib(self, attrib: str, value: str, regex: str = None):
"""Helper function for setting string attributes in the XML tree.
Args:
attrib (str): the target attribute.
value (str): the value to set.
regex (str | None, optional): A regex str for value checking. Defaults to None.
Raises:
ValueError: Value does not satisfy the regex condition.
"""
if regex and not re.findall(regex, value):
raise ValueError(
f"Invalid value for svg attribute {attrib}:", value)
self.root.attrib[attrib] = value
def __get_attrib(self, attrib: str, default: Any = None) -> Any:
"""Helper function for getting an svg attribute from the XML tree.
Args:
attrib (str): the attribute to return.
default (Any, optional): The default value of the attribute if it does not exist.
Returns:
Any: The attribute value. Will return None if attribute does not exist and no default value was specified.
"""
if attrib not in self.root.attrib and default:
if default:
self.root.attrib[attrib] = default
else:
return None
return self.root.attrib[attrib]
def __calc_sizes(self, dpi: List[int] = None, sizes: List[Union[int, Tuple[int, int]]] = None) -> List[Tuple[int, int]]:
"""Helper function to calculate the sizes of all images being rendered. Converts DPI values in pixel dimension.
Args:
dpi (List[int], optional): DPI of the images being rendered. Defaults to None.
sizes (List[Union[int, Tuple[int, int]]] | None, optional): Sizes of the images being rendered. Defaults to None.
Returns:
List[Tuple[int, int]]: A list of sizes (int pairs) of the images being rendered.
"""
sw, sh = self.size
if not dpi and not sizes:
values = [(sw, sh)]
else:
values = []
if dpi:
values.extend(
[(round(sw * (i / __class__.__SYSTEM_DPI)), round(sh * (i / __class__.__SYSTEM_DPI))) for i in dpi])
if sizes:
values.extend([i if isinstance(i, tuple) else (
i, round(i * sh/sw)) for i in sizes])
return values
def __max_size(self, sizes: List[Tuple[int, int]]) -> Tuple[int, int]:
"""Helper function to determine the largest image size to render such that all other sizes are a down scaling of it.
Args:
sizes (List[Tuple[int, int]]): The sizes of the images being rendered.
Returns:
Tuple[int, int]: A size (int pair) representing the largest necessary image to render.
"""
sw, sh = self.size
max_width, max_height = (max(i) for i in zip(*sizes))
multi = max(max_width / sw, max_height / sh)
return round(sw * multi), round(sh * multi)
def __im_skia(self, size: Tuple[int, int]) -> Image.Image:
"""Helper function to render a single PIL.Image object using skia-python.
Args:
size (Union[int, Tuple[int, int]], optional): Size of the rendered image.
Returns:
Image.Image: An instance of PIL.Image.Image.
"""
path = Path(self.fp).resolve()
skia_stream = skia.Stream.MakeFromFile(str(path))
skia_svg = skia.SVGDOM.MakeFromStream(skia_stream)
w, h = skia_svg.containerSize()
sw, sh = size
surface = skia.Surface(round(sw), round(sh))
with surface as canvas:
canvas.scale(round(sw) / w, round(sh) / h)
skia_svg.render(canvas)
with io.BytesIO(surface.makeImageSnapshot().encodeToData()) as f:
img = Image.open(f)
img.load()
return img
def __im_inkscape(self, size: Tuple[int, int], margin: int = None, area: str = 'page') -> Image.Image:
"""Helper function to render a single PIL.Image object using inkscape.
Args:
size (Union[int, Tuple[int, int]], optional): Size of the rendered image.
margin (int, optional): Margins on the rendered image. Defaults to None.
area (str, optional): The area to render. Valid values are 'page', 'drawing', and a string of form 'x y w h'. Defaults to 'page'.
Returns:
Image.Image: An instance of PIL.Image.Image.
"""
path = Path(self.fp).resolve()
options = ["inkscape", "--export-filename=-", "--export-type=png"]
if area.lower() == 'page':
options.extend(["--export-area-page"])
elif area.lower() == 'drawing':
options.extend(["--export-area-drawing"])
else:
options.extend([f"--export-area={area}"])
sw, sh = size
if size:
options.extend([f"--export-width={sw}", f"--export-height={sh}"])
if margin:
options.extend([f"--export-margin={margin}"])
if not path.exists():
return None
else:
options.extend([f"{path}"])
try:
pipe = subprocess.Popen(options, stdout=subprocess.PIPE)
except FileNotFoundError:
raise FileNotFoundError("Please make sure inkscape is installed and has been added to the PATH")
pipe.stdout.readline()
pipe.stdout.readline()
return Image.open(pipe.stdout)
def __im(self, size: Tuple[int, int], margin: int = None, area: str = 'page', renderer: str = 'skia') -> Image.Image:
"""Helper function to choose proper renderer. Throws an error if the renderer is not supported.
"""
if renderer == 'skia':
return self.__im_skia(size)
elif renderer == 'inkscape':
return self.__im_inkscape(size, margin, area)
else:
raise ValueError(
"Invalid renderer. Only supported renderers are 'skia' and 'inkscape'")
def __im_multi(self, sizes: List[Tuple[int, int]], margin: int = None, area: str = 'page', filter: str = "lanczos", renderer: str = "skia") -> List[Image.Image]:
"""Helper function to generate images of multiple specified sizes.
Args:
sizes (List[Union[int, Tuple[int, int]]], optional): Sizes of the images to render.
margin (int, optional): Margin of the images (shared across all). Defaults to None.
area (str, optional): The area to render. Valid values are 'page', 'drawing', and a string of form 'x y w h'. Defaults to 'page'.
filter (str, optional): Which filter to use for to downscale. Use 'rerender' to render each image individual at desired size. Defaults to 'lanczos'.
Raises:
ValueError: Filter is invalid.
Returns:
List[Image.Image]: A list of PIL.Image.Image instances of different sizes.
"""
if filter not in __class__.__RESAMPLING_FILTERS:
raise ValueError(
f"Invalid filter: {filter}\nValid filters are: {' '.join(__class__.__RESAMPLING_FILTERS.keys())}")
if filter == "rerender":
return list(self.__im(size=size, margin=margin, area=area, renderer=renderer) for size in sizes)
else:
img = self.__im(size=self.__max_size(sizes), margin=margin, area=area, renderer=renderer)
return list(img.resize(size, __class__.__RESAMPLING_FILTERS[filter.lower()]) for size in sizes)
def __export(img: Image.Image, stem: str = None, format: Union[str, List[str]] = "png"):
"""Helper function to export a PIL.Image.Image instance to another image format.
Args:
img (Image.Image): The image to export.
stem (str, optional): The name/path of the image (without the extension). Defaults to None.
format (str | List[str], optional): The formats to export. Defaults to "png".
Valid formats are defined here: https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html\n
Raises:
FileNotFoundError: The target directory for exporting does not exist.
"""
if isinstance(format, str):
format = [format]
parent = Path(stem).resolve().parent
if not parent.is_dir():
raise FileNotFoundError(
f"Could not locate the directory: {parent}\nPlease make sure the directory exists")
for f in format:
if f == "ico":
img.save(f"{stem}.{f}", sizes=[
(i, i) for i in __class__.__ICO_SIZES if i < img.width and i < img.height])
continue
try:
img.save(f"{stem}.{f}")
except OSError:
img.convert("RGB").save(f"{stem}.{f}")
def __export_multi(img: List[Image.Image], stem: str = None, format: Union[str, List[str]] = "png"):
"""Helper function to export multiple images in different formats.
Args:
img (List[Image.Image]): A list of images to export.
stem (str, optional): The name/path of the image (without the extension). Defaults to None.
format (str | List[str], optional): The formats to export. Defaults to "png".
Valid formats are defined here: https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html\n
"""
for i in img:
__class__.__export(i, f"{stem}_{i.size[0]}_{i.size[1]}", format)
def im(self, dpi: Union[int, List[int]] = None, size: List[Union[int, Tuple[int, int]]] = None, margin: int = None, area: str = 'page', filter: str = "lanczos", renderer: str = "skia") -> Union[Image.Image, List[Image.Image]]:
"""
Render the SVG as PIL.Image instance. The default rendering size is one the one provided by the SVG file.
Args:
dpi (int | List[int], optional): The DPI(s) to render the image(s) at.
size (List[Union[int, Tuple[int, int]]], optional): The size(s) to render the image(s) at.
Can be a single integer (defining the width) or a pair for width and height. Defaults to None.
margin (int, optional): Margin of the images (shared across all). Defaults to None.
area (str, optional): The area to render. Valid values are 'page', 'drawing', and a string of form 'x y w h'. Defaults to 'page'.
filter (str, optional): Which filter to use for to downscale. Use 'rerender' to render each image individual at desired size. Defaults to 'lanczos'.
Returns:
Union[Image.Image, List[Image.Image]]: _description_
"""
if isinstance(dpi, int):
dpi = [dpi]
if isinstance(size, int):
size = [size]
size = self.__calc_sizes(dpi, size)
if len(size) > 1:
return self.__im_multi(size, margin, area, filter, renderer=renderer)
else:
return self.__im(size[0], margin, area, renderer=renderer)
def save(self, fp: Union[str, Path, bytes] = None):
"""Saves the SVG XML tree.
Args:
fp (str | Path | bytes, optional): The save path. If no path is specified, this will overwrite the original SVG. Defaults to None.
"""
if fp is None:
fp = self.fp
else:
fp = Path(fp).resolve()
ET.ElementTree(self.root).write(fp)
def export(self, stem: str = None, format: Union[str, List[str]] = "png", dpi: Union[int, List[int]] = None, size: List[Union[int, Tuple[int, int]]] = None, margin: int = None, area: str = 'page', filter: str = "lanczos", renderer: str = "skia"):
"""Renders and exports image(s) of specified size(s) as specified format(s).
Args:
stem (str, optional): The name/path of the image (without the extension). Defaults to None.
format (str | List[str], optional): The formats to export. Defaults to "png".
Valid formats are defined here: https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html\n
dpi (int | List[int], optional): The DPI(s) to render the image(s) at. Defaults to 96.
size (List[Union[int, Tuple[int, int]]], optional): The size(s) to render the image(s) at.
Can be a single integer (defining the width) or a pair for width and height. Defaults to None.
margin (int, optional): Margin of the images (shared across all). Defaults to None.
area (str, optional): The area to render. Valid values are 'page', 'drawing', and a string of form 'x y w h'. Defaults to 'page'.
filter (str, optional): Which filter to use for to downscale. Use 'rerender' to render each image individual at desired size. Defaults to 'lanczos'.
"""
if not stem:
stem = self.fp.stem
img = self.im(dpi, size, margin, area, filter, renderer=renderer)
if isinstance(img, list) and len(img) > 1:
__class__.__export_multi(img, stem, format)
elif isinstance(img, list):
__class__.__export(img[0], stem, format)
else:
__class__.__export(img, stem, format)
@classmethod
def IM(cls, fp: Union[str, Path, bytes], dpi: Union[int, List[int]] = None, size: Union[int, Tuple[int, int]] = None, margin: int = None, area: str = 'page', renderer: str = 'skia'):
"""Classmethod that returns a PIL.Image instance of a specified SVG. Useful if you do not need to create a class object.
Args:
fp (str | Path | bytes): The path of the svg file.
dpi (int, optional): DPI of the rendered image. Defaults to 96.
size (Union[int, Tuple[int, int]], optional): Size of the rendered image. Defaults to None.
margin (int, optional): Margins on the rendered image. Defaults to None.
area (str, optional): The area to render. Valid values are 'page', 'drawing', and a string of form 'x y w h'. Defaults to 'page'.
Returns:
_type_: _description_
"""
return cls(fp).im(dpi, size, margin, area, renderer)
@classmethod
def EXPORT(cls, fp: Union[str, Path, bytes], stem: str = None, format: Union[str, List[str]] = "png", dpi: Union[int, List[int]] = None, size: Union[int, Tuple[int, int]] = None, margin: int = None, area: str = 'page', filter="lanczos", renderer: str = "skia"):
"""Classmethod that renders an SVG and exports image(s) of specified size(s) as specified format(s). Useful if you do not need to create an SVG class object.
Args:
fp (str | Path | bytes): The path of the svg file.
stem (str, optional): The name/path of the image (without the extension). Defaults to None.
format (str | List[str], optional): The formats to export. Defaults to "png".
Valid formats are defined here: https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html\n
dpi (int | List[int], optional): The DPI(s) to render the image(s) at. Defaults to 96.
size (List[Union[int, Tuple[int, int]]], optional): The size(s) to render the image(s) at.
Can be a single integer (defining the width) or a pair for width and height. Defaults to None.
margin (int, optional): Margin of the images (shared across all). Defaults to None.
area (str, optional): The area to render. Valid values are 'page', 'drawing', and a string of form 'x y w h'. Defaults to 'page'.
filter (str, optional): Which filter to use for to downscale. Use 'rerender' to render each image individual at desired size. Defaults to 'lanczos'.
"""
cls(fp).export(stem, format, dpi, size, margin, area, filter, renderer)
|
jlwoolf/pillow-svg
|
PILSVG/SVG.py
|
SVG.py
|
py
| 20,691 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "platform.system",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.Element",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "typing.Tuple",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "re.findall",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "typing.Any",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "skia.Stream.MakeFromFile",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "skia.Stream",
"line_number": 237,
"usage_type": "attribute"
},
{
"api_name": "skia.SVGDOM.MakeFromStream",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "skia.SVGDOM",
"line_number": 238,
"usage_type": "attribute"
},
{
"api_name": "skia.Surface",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "PIL.Image.Image",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 288,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 295,
"usage_type": "name"
},
{
"api_name": "PIL.Image.Image",
"line_number": 254,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 297,
"usage_type": "name"
},
{
"api_name": "PIL.Image.Image",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 297,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 310,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 310,
"usage_type": "name"
},
{
"api_name": "PIL.Image.Image",
"line_number": 310,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 310,
"usage_type": "name"
},
{
"api_name": "PIL.Image.Image",
"line_number": 335,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 335,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 335,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 335,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 365,
"usage_type": "name"
},
{
"api_name": "PIL.Image.Image",
"line_number": 365,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 365,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 365,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 377,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 377,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 377,
"usage_type": "name"
},
{
"api_name": "PIL.Image.Image",
"line_number": 377,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 377,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 404,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 404,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.ElementTree",
"line_number": 415,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 415,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 417,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 417,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 417,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 443,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 443,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 443,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 443,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 459,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 459,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 459,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 459,
"usage_type": "name"
}
] |
73931882429
|
#!python
"""
A natural number, N, that can be written as the sum and product of a given set of at least two natural numbers, {a1, a2, ... , ak} is called a product-sum number: N = a1 + a2 + ... + ak = a1 × a2 × ... × ak.
For example, 6 = 1 + 2 + 3 = 1 × 2 × 3.
For a given set of size, k, we shall call the smallest N with this property a minimal product-sum number. The minimal product-sum numbers for sets of size, k = 2, 3, 4, 5, and 6 are as follows.
k=2: 4 = 2 × 2 = 2 + 2
k=3: 6 = 1 × 2 × 3 = 1 + 2 + 3
k=4: 8 = 1 × 1 × 2 × 4 = 1 + 1 + 2 + 4
k=5: 8 = 1 × 1 × 2 × 2 × 2 = 1 + 1 + 2 + 2 + 2
k=6: 12 = 1 × 1 × 1 × 1 × 2 × 6 = 1 + 1 + 1 + 1 + 2 + 6
Hence for 2≤k≤6, the sum of all the minimal product-sum numbers is 4+6+8+12 = 30; note that 8 is only counted once in the sum.
In fact, as the complete set of minimal product-sum numbers for 2≤k≤12 is {4, 6, 8, 12, 15, 16}, the sum is 61.
What is the sum of all the minimal product-sum numbers for 2≤k≤12000?
"""
#the smallest possible k, is 1+1+...+1, i.e. k, however this will not be a product sum for k>1
#also notice that 2xkx1x1x1x...x1 (k-2 ones) == 2k = 2 + k + 1+...+1 (k-2 ones)
#so out minimal product sum is k<=mps<=2k so we just need to look for product sums in this range
from functools import reduce, lru_cache
from operator import mul
from pe import prime_factors
def product(l):
return reduce(mul, l)
def is_ps(l):
return sum(l) == product(l)
@lru_cache(maxsize=None)
def c_prime_factors(n):
return prime_factors(n)
@lru_cache(maxsize=None)
def c_all_multiplicands(n):
return list(all_multiplicands(n))
@lru_cache(maxsize=None)
def all_multiplicands(n):
factors = c_prime_factors(n)
res = []
if len(factors)==1:
return [[n]]
ms = []
for i in range(len(factors)):
f = factors[i]
r = sorted(all_multiplicands(product(factors[:i]+factors[i+1:])))
if r not in res:
res.append(r)
for arr in r:
farr = sorted([f]+arr)
if farr not in ms:
ms.append(farr)
for j in range(len(arr)):
farr = sorted(arr[:j]+[f*arr[j]]+arr[j+1:])
if farr not in ms:
ms.append(farr)
return ms
def is_n_ps_for_k(n, k):
for candidate in all_multiplicands(n):
if len(candidate) > k:
continue #this shouldn't really happen
if(sum(candidate)+k-len(candidate))==n:
return True
return False
limit = 12000
s = set([])
for i in range(2,limit+1):
for j in range(i, 2*i+1):
if is_n_ps_for_k(j, i):
print([i, j])
s.add(j)
break
print(sum(s))
|
DanMayhem/project_euler
|
088.py
|
088.py
|
py
| 2,498 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "functools.reduce",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "operator.mul",
"line_number": 30,
"usage_type": "argument"
},
{
"api_name": "pe.prime_factors",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "functools.lru_cache",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "functools.lru_cache",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "functools.lru_cache",
"line_number": 43,
"usage_type": "call"
}
] |
30984637396
|
from datetime import datetime
from constants import ProducerTypes
from events.producers import get_producer
from events.utils import get_routing_key
from models import (
Task,
TaskCost,
)
from popug_schema_registry.models.v1.task_cost_added_event_schema import (
TaskCostAddedEventSchema,
)
def send_taskcost_added_event(taskcost: TaskCost, task: Task) -> None:
producer = get_producer(ProducerTypes.TASKCOSTS_BC)
event = TaskCostAddedEventSchema(
data={
"public_id": taskcost.public_id,
"task_public_id": task.public_id,
"debit_cost": taskcost.debit_cost,
"credit_cost": taskcost.credit_cost,
},
produced_at=datetime.utcnow(),
)
producer.publish_message(
event.json().encode("utf-8"),
get_routing_key(event.title, event.version),
)
|
Drozdetskiy/popug_jira
|
popug_accounting/src/events/taskcost/send_event.py
|
send_event.py
|
py
| 857 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "models.TaskCost",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "models.Task",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "events.producers.get_producer",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "constants.ProducerTypes.TASKCOSTS_BC",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "constants.ProducerTypes",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "popug_schema_registry.models.v1.task_cost_added_event_schema.TaskCostAddedEventSchema",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "events.utils.get_routing_key",
"line_number": 29,
"usage_type": "call"
}
] |
10747823823
|
'''S3 uploader module'''
import os
import time
import signal
import sys
import boto3
# This module seems to have some issues. pylint ignore them
from setproctitle import setproctitle, getproctitle # pylint: disable=E0611
from kafkatos3.ThreadPool import ThreadPool
def upload_file(self, filename):
'''horrible callback function outside the class because it needs to be pickable'''
self.upload_file_to_s3(filename)
class S3Uploader(object):
'''class for uploading files to s3'''
def __init__(self, config, logger):
'''constructor'''
self.config = config
self.logger = logger
self.pool = None
def upload_file_to_s3(self, filename):
'''upload file to s3'''
self.logger.info("Uploading file: " + filename + " to s3")
working_dir = self.config.get("main", "working_directory")
s3_key = "kafkatos3" + filename.replace(working_dir + "/tos3", "")
self.logger.info("S3 key is " + s3_key)
if self.config.get("s3", "s3_access_key") != "":
access_key = self.config.get("s3", "s3_access_key")
secret_key = self.config.get("s3", "s3_secret_key")
s3client = boto3.client("s3", aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
else:
s3client = boto3.client("s3")
bucket = self.config.get("s3", "s3_bucket_name")
s3client.upload_file(filename, bucket, s3_key)
os.remove(filename)
def run(self):
'''main executor'''
def cb_exit_gracefully(signum, frame):
'''callback to exit gracefully for a pool thread'''
self.logger.info("Shutting down S3Uploader, signum %d"% (signum))
sys.exit(0)
self.logger.info("S3Uploader process starting up")
self.pool = ThreadPool(int(self.config.get("s3", "s3uploader_workers")))
setproctitle("[s3upload] " + getproctitle())
signal.signal(signal.SIGINT, cb_exit_gracefully)
signal.signal(signal.SIGTERM, cb_exit_gracefully)
while True:
tos3_dir = os.path.join(self.config.get(
"main", "working_directory"), "tos3")
files = self.get_files(tos3_dir, ".gz")
self.pool.map(self.upload_file_to_s3, files)
time.sleep(float(self.config.get(
"s3", "s3upload_check_interval")))
sys.exit(0)
def get_files(self, directory, extension):
''' return a list of files in a directory recusively based on extension'''
file_list = []
for dirpath, _, files in os.walk(directory):
for filename in files:
fname = os.path.join(dirpath, filename)
filename, file_extension = os.path.splitext(fname)
if file_extension == extension:
file_list.append(fname)
return file_list
|
snowch/kafkatos3
|
kafkatos3/S3Uploader.py
|
S3Uploader.py
|
py
| 2,919 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "boto3.client",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "kafkatos3.ThreadPool.ThreadPool",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "setproctitle.setproctitle",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "setproctitle.getproctitle",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "signal.signal",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "signal.SIGINT",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "signal.signal",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "signal.SIGTERM",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 83,
"usage_type": "attribute"
}
] |
34690028943
|
from django.contrib import admin
from .models import Service, Category, Feature, FeatureItem
class FeatureItemInline(admin.StackedInline):
model = FeatureItem
@admin.register(Service)
class ServiceAdmin(admin.ModelAdmin):
list_display = ("name", "sub_title")
prepopulated_fields = {"slug": ("name",)}
search_fields = ("name", "title", "body")
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = "name",
search_fields = 'name',
@admin.register(Feature)
class FeatureAdmin(admin.ModelAdmin):
list_display = ('name', 'header_subtitle', 'slug', 'features_list_title')
list_editable = ('header_subtitle', 'slug', 'features_list_title')
prepopulated_fields = {'slug': ('name', )}
search_fields = 'name', 'header_subtitle', 'header_description', 'body'
inlines = [FeatureItemInline]
|
samshultz/techbitsdata
|
services/admin.py
|
admin.py
|
py
| 855 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.contrib.admin.StackedInline",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "models.FeatureItem",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.register",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "models.Service",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.register",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "models.Category",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.register",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "models.Feature",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin",
"line_number": 22,
"usage_type": "name"
}
] |
32371281333
|
import cv2
import matplotlib.pyplot as plt
def plotImg(img):
if len(img.shape) == 2:
plt.imshow(img, cmap='gray')
plt.show()
else:
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
t=0
img = cv2.imread('cv.png')
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
binary_img = cv2.adaptiveThreshold(gray_img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV, 131, 15)
plotImg(binary_img)
_, _, boxes, _ = cv2.connectedComponentsWithStats(binary_img)
# first box is the background
boxes = boxes[1:]
filtered_boxes = []
for x,y,w,h,pixels in boxes:
if h < 100 and w < 100 and h > 10 and w > 10:
filtered_boxes.append((x,y,w,h))
t=t+1
for x,y,w,h in filtered_boxes:
cv2.rectangle(img, (x,y), (x+w,y+h), (0,0,255),2)
print(t)
plotImg(img)
|
RisinPhoenix12/Computer-Vision
|
dots.py
|
dots.py
|
py
| 818 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "cv2.cvtColor",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "cv2.imread",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "cv2.adaptiveThreshold",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.ADAPTIVE_THRESH_GAUSSIAN_C",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "cv2.THRESH_BINARY_INV",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "cv2.connectedComponentsWithStats",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 27,
"usage_type": "call"
}
] |
34111450286
|
"""helloworld URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
''' from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
] '''
from django.conf.urls import url,include
from django.contrib import admin
from . import view,mysql
from ctrl import views
from controller import index
from controller import gui
# 包涵路由
other=[
url(r'^info/',view.info),
url(r'^psd/',view.psd),
]
urlpatterns = [
url(r'^$', view.hello),
url(r'^hi',view.hi),
url(r'^int/',view.myint),
url(r'^json/',view.json),
url(r'^res/',view.res),
url(r'^admin/', admin.site.urls),
url(r'^insert/',mysql.insert),
url(r'^find/',mysql.find),
url(r'^update/',mysql.update),
url(r'^del/',mysql.delete),
url(r'^user/',include(other)),
url(r'^cc/',views.cc),
url(r'^index/',index.index),
url(r'^gui/',gui.index),
url(r'^controller/',include('controller.urls')), #使用子路由
]
|
githubrghd/mydemo
|
python-demo/helloworld/helloworld/urls.py
|
urls.py
|
py
| 1,551 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "django.conf.urls.url",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "ctrl.views.cc",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "ctrl.views",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "controller.index.index",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "controller.index",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "controller.gui.index",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "controller.gui",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 52,
"usage_type": "call"
}
] |
6589861302
|
from elasticsearch import Elasticsearch
import pandas as pd
from contexttimer import Timer
es = Elasticsearch(
"http://rgai3.inf.u-szeged.hu:3427/",
basic_auth=("elastic", "V7uek_ey6EdQbGBz_XHX"),
verify_certs=False
)
def get_highlights(csv, es, size):
# adatok kinyerése pd-ből, a már tisztított kérdésekkel
df = pd.read_csv(csv,
names=['question', 'context'],
header=1,
encoding='utf-8')
milqa_contexts_dict = dict()
for index, record in df.iterrows():
clean_question = record['question']
context = record['context']
if clean_question not in milqa_contexts_dict:
milqa_contexts_dict[clean_question] = context
else:
pass
with Timer() as t:
result_dict = dict()
error_counter = 0
id = 0
match_len = 0
all_context = list()
all_question = list()
for key, value in milqa_contexts_dict.items():
question = key
official_context = value.split("|||")[1]
# query top 10 guesses
body = {
"size": size,
"query": {
"match": {
"document": question
}
}
}
s = es.search(index='milqa_w_lemma_w_official_context', body=body)
result_contexts = list()
result_official_contexts = list()
for hit in s['hits']['hits']:
result_contexts.append(hit["_source"]["document"])
result_official_contexts.append(hit["_source"]["official_document"])
# error_dict = dict()
result_official_contexts_set = set(single_context for single_context in result_official_contexts)
if official_context in result_official_contexts_set:
match_counter = 1
result_number = 0
for result_official_context in result_official_contexts:
if result_official_context == official_context:
result_number = match_counter
break
else:
match_counter += 1
match_len += 1
all_context.append(value)
all_question.append(key)
else:
error_counter += 1
result_number = 'Nincs benne'
all_context.append(value)
all_question.append(key)
if isinstance(result_number, str):
result_dict[id] = result_number
else:
result_dict[id] = (1 / int(result_number))
id += 1
summary = 0.0
error_counter_check = 0
summary_counter = 0
number: float
for key, number in result_dict.items():
if isinstance(number, float):
summary += number
summary_counter += 1
if isinstance(number, str):
error_counter_check += 1
print("összes eltalát eset " + str(size) + " size mérettel: " + str(summary_counter))
print("összes eset " + str(size) + " size mérettel: " + str(len(milqa_contexts_dict)))
print("összes vizsgált számon kívüli eset " + str(size) + " size mérettel: " + str(error_counter_check))
print("összes eltalált/összes eset (Precision@k): " + str(summary_counter / len(milqa_contexts_dict)))
print("MRR: " + str(summary / len(milqa_contexts_dict)) + " | error counter: " + str(
error_counter)) # + "\n" + str(result_dict))# + "\n" + all_context[2] + "\n" + all_question[2])
print(f"Time spent: {t.elapsed:.2f} seconds")
return 0
if __name__ == '__main__':
csv = 'q_wPoS_wLemma_c_wLemma_c_wOfficial.csv'
# csv = 'q_wLemma_c_wLemma_c_wOfficial.csv'
print(get_highlights(csv, es, 300))
# posLemma: 12769 lemma: 12845
# 1 pos lemma:
# összes eltalát eset 1 size mérettel: 8393
# összes eset 1 size mérettel: 12769
# összes vizsgált számon kívüli eset 1 size mérettel: 4376
# összes eltalált/összes eset (Precision@k): 0.657295011355627
# MRR: 0.657295011355627 | error counter: 4376
# Time spent: 75.06 seconds
#
# 300 pos lemma:
# összes eltalát eset 300 size mérettel: 12559
# összes eset 300 size mérettel: 12769
# összes vizsgált számon kívüli eset 300 size mérettel: 210
# összes eltalált/összes eset (Precision@k): 0.9835539196491503
# MRR: 0.7494510958150116 | error counter: 210
# Time spent: 480.42 seconds
#
# 300 lemma:
# összes eltalát eset 300 size mérettel: 12638
# összes eset 300 size mérettel: 12845
# összes vizsgált számon kívüli eset 300 size mérettel: 207
# összes eltalált/összes eset (Precision@k): 0.9838847800700662
# MRR: 0.7403596956400766 | error counter: 207
# Time spent: 599.05 seconds
#
# 1 lemma
# összes eltalát eset 1 size mérettel: 8315
# összes eset 1 size mérettel: 12845
# összes vizsgált számon kívüli eset 1 size mérettel: 4530
# összes eltalált/összes eset (Precision@k): 0.64733359283768
# MRR: 0.64733359283768 | error counter: 4530
# Time spent: 80.92 seconds
|
szegedai/SHunQA
|
scripts/evals/highlights_score_test_w_preprocessed_questions.py
|
highlights_score_test_w_preprocessed_questions.py
|
py
| 5,241 |
python
|
hu
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "elasticsearch.Elasticsearch",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "contexttimer.Timer",
"line_number": 31,
"usage_type": "call"
}
] |
7227006625
|
import tqdm
import argparse
import os
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--src-set', '-src-set', type=str, default=r'/home/v-jiaya/RetrieveNMT/data/MD/en-de/iwslt14-en-de/train/train.en',help='source file')
parser.add_argument('--new-src-set', '-new-tgt-set', type=str, default=r'/home/v-jiaya/fast_align/data/test.en-de',help='source file')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
with open(args.src_set,"r", encoding="utf-8") as src_r:
with open(args.new_src_set, "w", encoding="utf-8") as new_src_w:
print("reading source data file: {}".format(args.src_set))
src_lines = src_r.readlines()
for line_id, src_line in tqdm.tqdm(enumerate(src_lines)):
src_line=src_line.strip()
concat_lines = src_line.split(" [APPEND] ")[1].split(" [SRC] ")
for item in concat_lines:
src, tgt = item.split(" [TGT] ")
new_src_w.write("{} ||| {}\n".format(src, tgt))
new_src_w.flush()
|
CSJianYang/RetrieveNMT
|
RetrieveNMT/SMT/generate_align_data.py
|
generate_align_data.py
|
py
| 1,178 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 19,
"usage_type": "call"
}
] |
28326166820
|
"""
OCR Pagination
"""
from past.utils import old_div
from rest_framework.response import Response
from rest_framework.pagination import PageNumberPagination
from django.conf import settings
from ocr.permission import get_permissions
# -------------------------------------------------------------------------------
# pylint: disable=too-many-ancestors
# pylint: disable=no-member
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-locals
# pylint: disable=arguments-differ
# pylint: disable=unused-argument
# pylint: disable=line-too-long
# pylint: disable=too-many-statements
# -------------------------------------------------------------------------------
class CustomOCRPagination(PageNumberPagination):
"""
OCR Pagination
"""
def __init__(self):
self.query_set = None
self.request = None
self.view = None
self.list_serializer = None
def modified_get_paginate_response(self, page):
"""
Desc:
"""
try:
page_number = int(self.request.query_params.get('page_number', settings.PAGENUMBER))
except ValueError:
page_number = settings.PAGENUMBER
try:
page_size = int(self.request.query_params.get('page_size', settings.PAGESIZE))
except ValueError:
page_size = settings.PAGESIZE
pagination = self.get_page_count(page, page_number, page_size)
permission_details = get_permissions(user=self.request.user,
model=self.list_serializer.Meta.model.__name__.lower(),
type='list')
return Response({
'data': pagination["current_data"],
'total_number_of_pages': pagination['count'],
'current_page': pagination['current_page'],
'current_page_size': pagination['current_page_size'],
'current_item_count': len(pagination["current_data"]),
'total_data_count': pagination['total_data_count'],
'permission_details': permission_details
})
def get_page_count(self, page, page_number=1, page_size=10):
"""
Desc:
"""
if page_size < 1:
page_size = 1
total_data_count = len(page)
if total_data_count < 1:
return {
"count": 0,
"current_page": 0,
"current_page_size": 0,
"total_data_count": total_data_count,
"current_data": []
}
total_number_of_pages = (old_div((total_data_count - 1), page_size)) + 1
if page_number > total_number_of_pages:
page_number = 1
page_size = 10
initial_count = (page_number - 1) * page_size
end_count = initial_count + page_size
page_data = page[initial_count:end_count]
serialized_page_data = self.list_serializer(page_data, many=True,
context={"request": self.request})
data = [i for i in serialized_page_data.data if i]
total_data_count = len(data)
# pylint: disable= line-too-long
return {
"count": total_number_of_pages,
"current_page": page_number,
"current_page_size": page_size,
"current_data": data,
"total_data_count": total_data_count
}
def paginate_queryset(self, queryset, request, view=None, list_serializer=None):
"""
Desc:
"""
self.request = request
self.view = view
self.query_set = queryset
self.list_serializer = list_serializer
return self.query_set
|
Srinidhi-SA/temp_spark
|
SPARK_DOCKER/code/mAdvisor-api/ocr/pagination.py
|
pagination.py
|
py
| 3,723 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "rest_framework.pagination.PageNumberPagination",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.PAGENUMBER",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.PAGENUMBER",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.PAGESIZE",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.PAGESIZE",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "ocr.permission.get_permissions",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "past.utils.old_div",
"line_number": 74,
"usage_type": "call"
}
] |
17689672862
|
import torch
from torch.nn import Module, Conv2d, LeakyReLU, PReLU, BatchNorm2d, Sequential, PixelShuffle, AdaptiveAvgPool2d, Flatten, Linear, Dropout2d, Dropout
class ResidualUnit(Module):
def __init__(self):
super(ResidualUnit, self).__init__()
self.conv1 = Sequential(Conv2d(64, 64, 3, 1, "same"), BatchNorm2d(64), PReLU(64))
self.conv2 = Sequential(Conv2d(64, 64, 3, 1, "same"), BatchNorm2d(64))
def forward(self, input):
out = self.conv1(input)
out = self.conv2(out)
return input + out
class UpsampleUnit(Module):
def __init__(self):
super(UpsampleUnit, self).__init__()
self.conv = Conv2d(64, 256, 3, 1, "same")
self.shuffle = PixelShuffle(2)
self.activation = PReLU(64)
def forward(self, X):
return self.activation(self.shuffle(self.conv(X)))
class Generator(Module):
def __init__(self, no_resBlocks):
super(Generator, self).__init__()
self.residuals = Sequential(*[ResidualUnit()] * no_resBlocks)
self.upsample = Sequential(UpsampleUnit(), UpsampleUnit())
self.initialConv = Sequential(Conv2d(3, 64, 9, 1, "same"), PReLU(64))
self.midConv = Sequential(Conv2d(64, 64, 3, 1, "same"), BatchNorm2d(64))
self.finalConv = Conv2d(64, 3, 9, 1, "same")
def forward(self, input):
input = self.initialConv(input)
out = self.residuals(input)
out = self.midConv(out)
out = out + input
out = self.upsample(out)
out = self.finalConv(out)
return torch.tanh(out)
class DiscConvBlock(Module):
def __init__(self, in_channels, out_channels, stride):
super(DiscConvBlock, self).__init__()
self.conv = Conv2d(in_channels, out_channels, 3, stride, 1)
self.bn = BatchNorm2d(out_channels)
self.activation = LeakyReLU(0.2)
self.dropout = Dropout2d(p=0.50)
def forward(self, X):
return self.dropout(self.activation(self.bn(self.conv(X))))
class Discriminator(Module):
def __init__(self):
super(Discriminator, self).__init__()
self.initial_conv = Sequential(
Conv2d(3, 64, 3, 1, "same"),
LeakyReLU(0.2),
Dropout2d(p=0.5)
)
self.conv_seq = Sequential(
DiscConvBlock(64, 64, 2),
DiscConvBlock(64, 128, 1),
DiscConvBlock(128, 128, 2),
DiscConvBlock(128, 256, 1),
DiscConvBlock(256, 256, 2),
DiscConvBlock(256, 512, 1),
DiscConvBlock(512, 512, 2),
AdaptiveAvgPool2d(1),
Flatten()
)
self.fc = Sequential(
Linear(512, 1024),
LeakyReLU(0.2),
Dropout(0.50),
Linear(1024, 1)
)
def forward(self, X):
return self.fc(self.conv_seq(self.initial_conv(X)))
|
abed11326/Training-a-Super-Resolution-GAN-for-4x-image-upscaling
|
models.py
|
models.py
|
py
| 2,878 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.nn.PReLU",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn.PixelShuffle",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn.PReLU",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.nn.PReLU",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.tanh",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "torch.nn.Dropout2d",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torch.nn.Dropout2d",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "torch.nn.AdaptiveAvgPool2d",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "torch.nn.Flatten",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "torch.nn.Linear",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "torch.nn.Linear",
"line_number": 79,
"usage_type": "call"
}
] |
34351667294
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
data = pd.read_csv('CCLhora2.csv')
data1 = pd.read_csv('CCLdia2.csv')
data2 = pd.read_csv('CCLsemana2.csv')
data3 = pd.read_csv('CCLmes2.csv')
data4 = pd.read_csv('CCLaño2.csv')
data["date"] = pd.to_datetime(data["date"], unit='ms')
data1["date"] = pd.to_datetime(data1["date"], unit='ms')
data2["date"] = pd.to_datetime(data2["date"], unit='ms')
data3["date"] = pd.to_datetime(data3["date"], unit='ms')
data4["date"] = pd.to_datetime(data4["date"], unit='ms')
plt.rcParams["figure.figsize"] = (12,12)
plt.plot(data['date'], data['tamaño'])
plt.plot(data1['date'], data1['tamaño'])
plt.plot(data2['date'], data2['tamaño'])
plt.plot(data3['date'], data3['tamaño'])
plt.plot(data4['date'], data4['tamaño'])
plt.ylabel('Weight of the LCC-')
plt.xlabel('Time')
plt.savefig('fgtodosc2.png')
|
pedrolf8/MastodonTFG
|
paso.py
|
paso.py
|
py
| 869 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
}
] |
20855466611
|
import requests
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
import json
import os
from decouple import config
from prettyprinter import pprint
import GUI
# For the API documentation go to
# https://coinmarketcap.com/api/documentation/v1/#section/Quick-Start-Guide
API_KEY = config("API_KEY")
url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest'
parameters = {
'start': '1',
'limit': '50',
'convert': 'USD',
"CMC_PRO_API_KEY": API_KEY,
}
my_portfolio = [
{
"symbol": "BTC",
"amount_owned": 2000,
"price_payed_per_unit": 20.0
},
{
"symbol": "ETH",
"amount_owned": 500,
"price_payed_per_unit": 2.0
},
{
"symbol": "XPR",
"amount_owned": 1500,
"price_payed_per_unit": 0.1
},
{
"symbol": "XLM",
"amount_owned": 2000,
"price_payed_per_unit": 0.2
},
{
"symbol": "EOS",
"amount_owned": 1000,
"price_payed_per_unit": 2.0
},
]
def get_api_info():
api_request = requests.get(url=url, params=parameters)
data = json.loads(api_request.content)
return data["data"]
def add_coin_msg_to_gui(coin_msg):
coin_frame = GUI.Frame(GUI.second_frame,
relief='ridge',
borderwidth=2,
bg='#F2F2F2')
coin_frame.pack(
anchor='center',
pady=(10, 0),
padx=(50, 50),
fill='x'
)
display_coin_msg = GUI.Label(coin_frame,
text=coin_msg,
anchor='w',
font=('Time New Roman', '13', 'bold underline'),
bg='#F2F2F2')
display_coin_msg.pack(fill='x',
padx=10)
def show_portfolio_profit_loss_on_gui(profit_loos):
# view client version on bottom left corner
if profit_loos > 0:
color = "green"
else:
color = "red"
portfolio_profit_loos_label = GUI.Label(GUI.root,
text="Portfolio Total Profit/Loos: ${0:.2f}".format(profit_loos),
font=('Time New Roman', '9', 'bold'),
bg="white",
fg=color)
portfolio_profit_loos_label.pack(pady=(5, 0),
anchor='e')
def format_data():
try:
coins = get_api_info()
portfolio_profit_loos = 0
for coin in coins:
for sym in my_portfolio:
if sym["symbol"] == coin["symbol"]:
total_paid = sym["price_payed_per_unit"] * sym["amount_owned"]
total_current_value = sym["amount_owned"] * float(coin["quote"]["USD"]["price"])
profit = total_current_value - total_paid
profit_percentage = profit / total_paid * 100
profit_per_coin = float(coin["quote"]["USD"]["price"]) - sym["price_payed_per_unit"]
my_coin_msg = f'Name: {coin["name"]} \n' \
f'Symbol: {coin["symbol"]} \n' \
f'Rank: {coin["cmc_rank"]} \n' \
f'Current Price: ${float(coin["quote"]["USD"]["price"]):.2f} \n' \
f'24 Hour Change: {float(coin["quote"]["USD"]["percent_change_24h"]):.2f}% \n' \
f'Paid per coin: ${sym["price_payed_per_unit"]:.2f} \n' \
f'Amount Owned: {sym["amount_owned"]} units \n' \
f'Total current value: ${total_current_value:.2f} \n' \
f'Total Paid: ${total_paid:.2f} \n' \
f'Profit/Loss per coin:${profit_per_coin:.2f} \n' \
f'Profit/Loss: ${profit:.2f} \n' \
f'Profit/Loss percentage: {profit_percentage:.2f}%'
portfolio_profit_loos += profit
add_coin_msg_to_gui(my_coin_msg)
show_portfolio_profit_loss_on_gui(portfolio_profit_loos)
except (ConnectionError, Timeout, TooManyRedirects) as e:
pprint(e)
if __name__ == "__main__":
# Clear command line window
os.system('cls')
format_data()
GUI.root.mainloop()
|
edumarg/cyrpto_currency_portfolio
|
main.py
|
main.py
|
py
| 4,492 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "decouple.config",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "GUI.Frame",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "GUI.second_frame",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "GUI.Label",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "GUI.Label",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "GUI.root",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "requests.exceptions.ConnectionError",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "requests.exceptions.Timeout",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "requests.exceptions.TooManyRedirects",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "prettyprinter.pprint",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "GUI.root.mainloop",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "GUI.root",
"line_number": 130,
"usage_type": "attribute"
}
] |
17195304707
|
#threading better due to network I/O hinderances
#from threading import Thread
#multiprocessing used for cpu intensive processes (no networking hinderances)
from multiprocessing import Process, Queue
from time import time
def check_value_in_list(x, j, num_of_processes, queue):
max_num_to_check = 10**8
lower_bnd = int((j * max_num_to_check)/num_of_processes)
upper_bnd = int(((j + 1) * max_num_to_check)/num_of_processes)
num_of_hits = 0
for i in range(lower_bnd, upper_bnd):
if i in x:
num_of_hits += 1
queue.put((lower_bnd, upper_bnd, num_of_hits))
#num_threads = 4
def run():
comparison_list = [1,2,3]
num_processes = 4
queue = Queue()
processes = []
for i in range(num_processes):
t = Process(target=check_value_in_list, args=(comparison_list, i, num_processes, queue))
processes.append(t)
for t in processes:
t.start()
for t in processes:
t.join()
queue.put("DONE")
while True:
v = queue.get()
if v == "DONE":
break
lower, upper, num_of_hits = v
print("Between", lower, "and", upper, "we have", num_of_hits, "values in the list")
if __name__ == "__main__":
start_time = time()
run()
print("Script time:", time() - start_time, " seconds")
|
ganton000/Concurrency
|
multiprocessing-tutorial/main.py
|
main.py
|
py
| 1,200 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "multiprocessing.Queue",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 52,
"usage_type": "call"
}
] |
44730303411
|
import os
import discord
from discord.utils import get
from discord.ext import commands, tasks
from dotenv import load_dotenv
import random
import re
import time
import requests
load_dotenv()
OMDB_KEY = os.getenv('OMDB_KEY')
STREAMING_KEY = os.getenv('STREAMING_KEY')
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD')
image_types = ["png", "jpeg", "gif", "jpg", "mp4", "mov"]
client = discord.Client(command_prefix = 'sponge', intents = discord.Intents.all())
intents = discord.Intents.default()
intents.message_content = True
bot = commands.Bot(command_prefix = '!', intents = intents)
@bot.event
async def on_ready():
print(f'logged in as {bot.user.name}')
@bot.command(name='shit')
async def random_image(ctx, source_channel_id = 815464674404990988, source_server_id = 735280881664655410, target_server_id = 718995069847470141, target_channel_id = 718995070316970046):
try:
source_server = discord.utils.get(bot.guilds, id = source_server_id)
target_server = discord.utils.get(bot.guilds, id = target_server_id)
source_channel = discord.utils.get(source_server.channels, id = source_channel_id)
target_channel = discord.utils.get(target_server.channels, id = target_channel_id)
messages = []
async for message in source_channel.history(limit = 500):
messages.append(message)
image_messages = [message for message in messages if message.attachments]
if image_messages:
random_message = random.choice(image_messages)
attachment = random_message.attachments[0]
image_url = attachment.url
sender = random_message.author
await target_channel.send(image_url)
await target_channel.send(f'courtesy of {sender}')
else:
await ctx.send('no image found')
except discord.NotFound:
await ctx.send('source image not found')
@bot.command(name = 'problem')
async def ask(ctx):
await ctx.send('https://tenor.com/view/whattheproblemis-martin-lawrence-nationalsecurity-gif-27064298')
@bot.command(name = 'movie')
async def ask(ctx):
await ctx.send('What movie would you like to watch?')
def check(message):
return message.author == ctx.author and message.channel == ctx.channel
try:
user_response = await bot.wait_for('message', check=check, timeout=10.0)
await ctx.send(f'you want to watch: {user_response.content}')
response = requests.get(f'http://www.omdbapi.com/?t={user_response.content}&apikey={OMDB_KEY}')
imdb_id = response.json()['imdbID']
imdb_title = response.json()['Title']
imdb_plot = response.json()['Plot']
url = "https://streaming-availability.p.rapidapi.com/v2/get/basic"
querystring = {"country":"us",f"imdb_id":f"{imdb_id}","output_language":"en"}
headers = {
"X-RapidAPI-Key": "e145409a39mshf509ba14a206131p1acb3ejsnaeb0f27c7eb9",
"X-RapidAPI-Host": "streaming-availability.p.rapidapi.com"
}
response = requests.get(url, headers=headers, params=querystring)
example_json = response.json()
streaming_info = example_json['result']['streamingInfo']['us']
for i in streaming_info:
quality = streaming_info[i][0]['quality']
type_of_stream = streaming_info[i][0]['type']
link = streaming_info[i][0]['link']
await ctx.send(f'you can {type_of_stream} {imdb_title} on {i} in {quality} \n here is your link: {link}' )
await ctx.send(f' \n \n the plot is: {imdb_plot}')
except asyncio.TimeoutError:
await ctx.send('timeout. you did not send a response quick enough')
bot.run(TOKEN)
# @client.event
# async def on_ready():
# guild = discord.utils.get(client.guilds, name=GUILD)
# print(f'{client.user} has connected to the following guild:\n'
# f'{guild.name}(id: {guild.id})')
# @client.event
# async def on_message(message):
# # if the message is by the bot break the function - this stops endless loops
# if message.author == client.user:
# return
# # sends spongebob-text message
# if len(message.content) > 35:
# response = [x for x in message.content]
# for i in range(len(message.content)):
# upper_lower = random.randint(0,1)
# if upper_lower == 1:
# response[i] = message.content[i]
# elif upper_lower == 0:
# response[i] = message.content[i].upper()
# await message.channel.send(''.join(response))
# # looks for /d and number to roll random number generator
# if '/d' in message.content:
# num = re.search('(?<=\/d).[0-9]+', message.content)
# if num.group(0).isnumeric():
# string = f'{str(random.randrange(1,int(num.group(0))+1))}'
# await message.channel.send(string)
# else:
# await message.channel.send('thats not a number, try again')
# # responds with emoji
# if 'wz' in message.content.lower() or 'warzone' in message.content.lower() or 'cod' in message.content.lower() or 'call of duty' in message.content.lower():
# emoji = client.get_emoji(955552719379251300)
# await message.add_reaction(emoji)
# elif 'shot' in message.content.lower():
# emoji = client.get_emoji(951262317482479667)
# await message.add_reaction(emoji)
# for attachment in message.attachments:
# if any(attachment.filename.lower().endswith(image) for image in image_types):
# await attachment.save(f'attachments/{attachment.filename}')
# client.run(TOKEN)
|
aburpee/spongebob-text
|
app.py
|
app.py
|
py
| 5,748 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "dotenv.load_dotenv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "discord.Client",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "discord.Intents.all",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "discord.Intents",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "discord.Intents.default",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "discord.Intents",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "discord.utils.get",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "discord.utils",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "discord.utils.get",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "discord.utils",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "discord.utils.get",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "discord.utils",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "discord.utils.get",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "discord.utils",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "random.choice",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "discord.NotFound",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 85,
"usage_type": "call"
}
] |
43633237973
|
from __future__ import absolute_import
#typing
import numpy
#overrides
import torch
from torch.nn.modules.linear import Linear
import torch.nn.functional as F
from allennlp.common.checks import check_dimensions_match
from allennlp.data import Vocabulary
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits
from allennlp.training.metrics import CategoricalAccuracy
class SimpleTagger(Model):
u"""
This ``SimpleTagger`` simply encodes a sequence of text with a stacked ``Seq2SeqEncoder``, then
predicts a tag for each token in the sequence.
Parameters
----------
vocab : ``Vocabulary``, required
A Vocabulary, required in order to compute sizes for input/output projections.
text_field_embedder : ``TextFieldEmbedder``, required
Used to embed the ``tokens`` ``TextField`` we get as input to the model.
encoder : ``Seq2SeqEncoder``
The encoder (with its own internal stacking) that we will use in between embedding tokens
and predicting output tags.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(self, vocab ,
text_field_embedder ,
encoder ,
initializer = InitializerApplicator(),
regularizer = None) :
super(SimpleTagger, self).__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.num_classes = self.vocab.get_vocab_size(u"labels")
self.encoder = encoder
self.tag_projection_layer = TimeDistributed(Linear(self.encoder.get_output_dim(),
self.num_classes))
check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(),
u"text field embedding dim", u"encoder input dim")
self.metrics = {
u"accuracy": CategoricalAccuracy(),
u"accuracy3": CategoricalAccuracy(top_k=3)
}
initializer(self)
#overrides
def forward(self, # type: ignore
tokens ,
tags = None,
metadata = None) :
# pylint: disable=arguments-differ
u"""
Parameters
----------
tokens : Dict[str, torch.LongTensor], required
The output of ``TextField.as_array()``, which should typically be passed directly to a
``TextFieldEmbedder``. This output is a dictionary mapping keys to ``TokenIndexer``
tensors. At its most basic, using a ``SingleIdTokenIndexer`` this is: ``{"tokens":
Tensor(batch_size, num_tokens)}``. This dictionary will have the same keys as were used
for the ``TokenIndexers`` when you created the ``TextField`` representing your
sequence. The dictionary is designed to be passed directly to a ``TextFieldEmbedder``,
which knows how to combine different word representations into a single vector per
token in your input.
tags : torch.LongTensor, optional (default = None)
A torch tensor representing the sequence of integer gold class labels of shape
``(batch_size, num_tokens)``.
metadata : ``List[Dict[str, Any]]``, optional, (default = None)
metadata containg the original words in the sentence to be tagged under a 'words' key.
Returns
-------
An output dictionary consisting of:
logits : torch.FloatTensor
A tensor of shape ``(batch_size, num_tokens, tag_vocab_size)`` representing
unnormalised log probabilities of the tag classes.
class_probabilities : torch.FloatTensor
A tensor of shape ``(batch_size, num_tokens, tag_vocab_size)`` representing
a distribution of the tag classes per word.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
embedded_text_input = self.text_field_embedder(tokens)
batch_size, sequence_length, _ = embedded_text_input.size()
mask = get_text_field_mask(tokens)
encoded_text = self.encoder(embedded_text_input, mask)
logits = self.tag_projection_layer(encoded_text)
reshaped_log_probs = logits.view(-1, self.num_classes)
class_probabilities = F.softmax(reshaped_log_probs, dim=-1).view([batch_size,
sequence_length,
self.num_classes])
output_dict = {u"logits": logits, u"class_probabilities": class_probabilities}
if tags is not None:
loss = sequence_cross_entropy_with_logits(logits, tags, mask)
for metric in list(self.metrics.values()):
metric(logits, tags, mask.float())
output_dict[u"loss"] = loss
if metadata is not None:
output_dict[u"words"] = [x[u"words"] for x in metadata]
return output_dict
#overrides
def decode(self, output_dict ) :
u"""
Does a simple position-wise argmax over each token, converts indices to string labels, and
adds a ``"tags"`` key to the dictionary with the result.
"""
all_predictions = output_dict[u'class_probabilities']
all_predictions = all_predictions.cpu().data.numpy()
if all_predictions.ndim == 3:
predictions_list = [all_predictions[i] for i in range(all_predictions.shape[0])]
else:
predictions_list = [all_predictions]
all_tags = []
for predictions in predictions_list:
argmax_indices = numpy.argmax(predictions, axis=-1)
tags = [self.vocab.get_token_from_index(x, namespace=u"labels")
for x in argmax_indices]
all_tags.append(tags)
output_dict[u'tags'] = all_tags
return output_dict
#overrides
def get_metrics(self, reset = False) :
return dict((metric_name, metric.get_metric(reset)) for metric_name, metric in list(self.metrics.items()))
SimpleTagger = Model.register(u"simple_tagger")(SimpleTagger)
|
plasticityai/magnitude
|
pymagnitude/third_party/allennlp/models/simple_tagger.py
|
simple_tagger.py
|
py
| 6,916 |
python
|
en
|
code
| 1,607 |
github-code
|
6
|
[
{
"api_name": "allennlp.models.model.Model",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "allennlp.nn.InitializerApplicator",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "allennlp.modules.TimeDistributed",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "torch.nn.modules.linear.Linear",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "allennlp.common.checks.check_dimensions_match",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "allennlp.training.metrics.CategoricalAccuracy",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "allennlp.training.metrics.CategoricalAccuracy",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "allennlp.nn.util.get_text_field_mask",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "allennlp.nn.util.sequence_cross_entropy_with_logits",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "allennlp.models.model.Model.register",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "allennlp.models.model.Model",
"line_number": 147,
"usage_type": "name"
}
] |
11002868168
|
from typing import List
class WordFilter:
def __init__(self, words: List[str]):
self.a ={}
for ind, i in enumerate(words):
for j in range(len(i) + 1):
for k in range(len(i) + 1):
now = i[:j] + '$' + i[k:]
self.a[now] = ind
def f(self, prefix: str, suffix: str) -> int:
k = prefix + '$' + suffix
return self.a.get(k, -1)
if __name__ == '__main__':
words = ["apple"]
prefix = "a"
suffix = "e"
obj = WordFilter(words)
print(obj.f(prefix, suffix))
# Your WordFilter object will be instantiated and called as such:
# obj = WordFilter(words)
# param_1 = obj.f(prefix,suffix)
|
xixihaha1995/CS61B_SP19_SP20
|
745. Prefix and Suffix Search.py
|
745. Prefix and Suffix Search.py
|
py
| 702 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
}
] |
74087520829
|
# -*- coding: utf-8 -*-
import re
import datetime
import bs4
import scrapy
from scrapy_wssc.Item.BookContentItem import BookContentItem
from scrapy_wssc.Item.BookItem import BookItem
class mobile_spider(scrapy.Spider):
name = 'mobile_spider'
def __init__(self, bid=None):
"""初始化起始页面和游戏bid
"""
super(mobile_spider, self).__init__()
self.bid = bid # 参数bid由此传入
self.start_urls = ['https://m.qu.la/wapsort/4_1.html'] #历史小说 1
#'https://www.qu.la/xuanhuanxiaoshuo/',#玄幻小说 2
# 'https://www.qu.la/dushixiaoshuo/'] #都市小说 3
self.allowed_domain = 'm.qu.la'
#self.driver = webdriver.Chrome(
# executable_path="C:/Program Files (x86)/Google/Chrome/Application/chromedriver.exe")
#self.driver.set_page_load_timeout(10) # throw a TimeoutException when thepage load time is more than 5 seconds.
#self.bookService = BookServiceImpl()
def parse(self, response):
pattern = re.compile(r'\d+')
book_list = response.xpath('//div[@class="recommend"]/div[@id="main"]/div')
for li in book_list:
bookItem = BookItem();
bookItem['id'] = pattern.search(li.xpath('a/@href').extract()[0]).group()
bookItem['cateId'] = 1
bookItem['name'] = li.xpath('a/p[@class="title"]/text()').extract()[0].strip()
bookItem['author'] = li.xpath('a/p[@class="author"]/text()').extract()[0].split(u':')[1]
bookItem['isHot'] = True
bookItem['isSerial'] = True
bookItem['status'] = 1
bookItem['lastUpdate'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
bookItem['describe'] = li.xpath('p[@class="review"]/text()').extract()[1].split(u':')[1].strip()
bookItem['bookUrl'] = 'https://m.qu.la'+li.xpath('a/@href').extract()[0]
bookItem['create_date'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
yield bookItem
#爬取章节
yield scrapy.Request(url='https://m.qu.la/booklist/'+bookItem['id']+'.html',callback=self.get_book_chapter_list, meta={"cateId":1})
def get_book_chapter_list(self,response):
soup = bs4.BeautifulSoup(response.text, 'lxml')
chapterList = soup.find('div',id="chapterlist").p
for chapter in chapterList:
pass
def get_book_info(self, response):
pattern = re.compile(r'\d+')
soup = bs4.BeautifulSoup(response.text, 'lxml')
bookItem = BookItem();
bookItem['id'] = pattern.search(soup.find('div',id="info").find('a',{"style":"color:red;"}).attrs['href']).group()
bookItem['cateId'] = response.meta['cateId']
bookItem['name'] = soup.find('div',id="info").h1.get_text()
bookItem['author'] = soup.find('div',id="info").p.get_text().split(u':' )[1]
bookItem['isHot'] = True
bookItem['isSerial'] = True
bookItem['status'] = 1
bookItem['lastUpdate'] = soup.find('div',id="info").find_all('p')[2].get_text().split(u':' )[1]
bookItem['describe'] = soup.find('div',id="intro").get_text().replace(" ", "")
bookItem['bookUrl'] = response.request.url
bookItem['create_date'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
yield bookItem
book_content_list = response.xpath('//div[@id="list"]/dl/dd')
for con_li in book_content_list:
con_url = con_li.xpath('a/attribute::href').extract()[0]
if con_url.startswith('/book/'):
yield scrapy.Request(url='https://www.qu.la' + con_url, callback=self.get_book_content,
meta={'url': 'https://www.qu.la' + con_url, "bookId": bookItem["id"]})
def get_book_content(self,response):
pattern = re.compile(r'^(https://www.qu.la/.*?)(\d+)(.html)$')
soup = bs4.BeautifulSoup(response.text, 'lxml')
bookContentItem = BookContentItem();
bookContentItem['id'] = pattern.search(response.meta['url']).group(2)
bookContentItem['bookId'] = response.meta['bookId']
bookContentItem['title'] = soup.find('div',attrs={"class":"bookname"}).h1.get_text()
bookContentItem['content'] = soup.find('div',id="content").get_text()
bookContentItem['linkUrl'] = response.meta['url']
bookContentItem['createDate'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
yield bookContentItem
|
chenrunhu/wssc_scrapy
|
scrapy_wssc/spiders/mobile_spider.py
|
mobile_spider.py
|
py
| 4,566 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "scrapy.Spider",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "scrapy_wssc.Item.BookItem.BookItem",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "scrapy_wssc.Item.BookItem.BookItem",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "scrapy_wssc.Item.BookContentItem.BookContentItem",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 95,
"usage_type": "attribute"
}
] |
6056727556
|
#!/usr/bin/env python3
import re
import collections
import vcf
import sys
import argparse
import copy
def parse_my_args():
parser = argparse.ArgumentParser("Combines a VCF of individual calls into one large VCF for population.")
parser.add_argument("vcf", nargs="?", help="Input VCF file; default stdin.")
parser.add_argument("-n", "--name", help="name of combined population.", required=True)
args = parser.parse_args()
return(args)
def get_arg_vars(args):
if args.vcf:
inconn = open(args.vcf, "r")
else:
inconn = sys.stdin
name = args.name
return(inconn, name)
def combine_vcf(vcfin, name, outwriter):
regex = re.compile(r"[/|]")
for record in vcfin:
calls = [call for call in record]
ad1 = 0
ad2 = 0
for call in calls:
callgt = regex.split(call["GT"])
if len(callgt) == 1:
inc = 2
else:
inc = 1
for i in callgt:
if i=="0":
ad1 += inc
if i=="1":
ad2 += inc
if ad1 >0 and ad2 > 0:
gt = "0|1"
elif ad2 > 0:
gt = "1"
else:
gt = "0"
writeout(gt, ad1, ad2, name, record, outwriter)
#print(cmh.summary())
#control_ad0, control_ad1 = [calls.AD[:2] for i in control]
#test_ad0, test_ad1 = [calls.AD[:2] for i in test]
def writeout(gt, ad1, ad2, name, record, writer):
newrecord = copy.deepcopy(record)
newrecord.samples = []
CallData = collections.namedtuple("CallData", ["GT", "AD"])
mycalldat = CallData(GT = str(gt), AD = [str(ad1), str(ad2)])
newrecord.samples.append(vcf.model._Call(newrecord, name, mycalldat))
writer.write_record(newrecord)
def main():
args = parse_my_args()
inconn, name = get_arg_vars(args)
vcfin = vcf.Reader(inconn)
outwriter = vcf.Writer(sys.stdout, vcfin)
combine_vcf(vcfin, name, outwriter)
inconn.close()
if __name__ == "__main__":
main()
#>>> for i in b:
#... for j in i:
#... try:print(j.data.AD)
|
jgbaldwinbrown/vcfstats
|
combine_single_indivs.py
|
combine_single_indivs.py
|
py
| 2,138 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "vcf.model._Call",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "vcf.model",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "vcf.Reader",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "vcf.Writer",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 69,
"usage_type": "attribute"
}
] |
23588347895
|
from time import sleep
import psycopg2
import os
import subprocess
from datetime import datetime
from pytz import timezone
import filecmp
db_name = os.environ['POSTGRES_DB']
db_user = os.environ['POSTGRES_USER']
db_pass = os.environ['PGPASSWORD']
db_host = os.environ['POSTGRES_HOST']
db_port = os.environ['POSTGRES_PORT']
#frequency = 18000 if os.environ['BACKUP_INTERVAL'] is None else os.environ['BACKUP_INTERVAL']
#/mnt/data/db
KEEP_ALL = 0
def purgeBackup(old_file,new_file):
if filecmp.cmp(old_file,new_file) == True:
os.remove(old_file)
def loadBackup(backup_file):
ps = subprocess.Popen(
['psql', '-h', db_host, '-U', db_user, '-d', db_name, '-f', backup_file],
stdout=subprocess.PIPE
)
output = ps.communicate()[0]
for line in output.splitlines():
print(line)
def getFileInfo(path):
result={}
result['path'] = path
# file modification timestamp of a file
m_time = os.path.getmtime(path)
# file creation timestamp in float
c_time = os.path.getctime(path)
# convert timestamp into DateTime object
result['modifiedOn'] = datetime.fromtimestamp(m_time)
# convert creation timestamp into DateTime object
result['createdOn'] = datetime.fromtimestamp(c_time)
return result
#get last backup
data = [getFileInfo('/mnt/data/db/{}'.format(item)) for item in os.listdir("/mnt/data/db") if item.endswith(".sql")]
sorted_data=sorted(data, key=lambda i: i['modifiedOn'],reverse=True)
last_backup = sorted_data[0]['path']
print(f"Last backup: {last_backup}")
#Connecto to the database
db_string = 'postgres://{}:{}@{}:{}/{}'.format(db_user, db_pass, db_host, db_port, db_name)
print(db_string)
db = psycopg2.connect(db_string)
cursor = db.cursor()
cursor.execute("SELECT version();")
version = cursor.fetchone()
print(version)
cursor.execute("SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'public'")
count = cursor.fetchone()
if len(count) > 0 and count[0] == 0:
print("*** RECREATE DB from Backup! ****")
print('loading {}'.format(last_backup))
loadBackup(last_backup)
else:
print("continue....")
db.close()
while True:
tz = timezone('EST')
x = datetime.now(tz)
new_backup=f'/mnt/data/db/postgres-backup-{x.strftime("%m%d%y%H%M%S")}.sql'
print('Backing up %s database to %s' % (db_name, new_backup))
ps = subprocess.Popen(
['pg_dump', '-h', db_host, '-U', db_user, '-d', db_name, '-f', new_backup],
stdout=subprocess.PIPE)
output = ps.communicate()[0]
for line in output.splitlines():
print(line)
if KEEP_ALL == 0:
purgeBackup(last_backup,new_backup)
last_backup = new_backup
sleep(3600)
|
cjrisua/vinomio-api
|
docker/vinomioHC/app.py
|
app.py
|
py
| 2,723 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "filecmp.cmp",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.path.getmtime",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "os.path.getctime",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "psycopg2.connect",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "pytz.timezone",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "subprocess.Popen",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 91,
"usage_type": "call"
}
] |
17944337782
|
import json
from django.views.generic import DetailView, ListView, View, CreateView
from django.core.exceptions import ImproperlyConfigured
from django.http import (
HttpResponse,
HttpResponseBadRequest,
HttpResponseRedirect
)
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.shortcuts import get_object_or_404
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from .models import Suggestion, SuggestionCopy
from .forms import AddSuggestionForm
class IndexView(DetailView):
"""
The landing page for the site
"""
model = Suggestion
template_name = 'suggestions/index.html'
def get_context_data(self, **kw):
ctx = super(IndexView, self).get_context_data(**kw)
ctx['recent_suggestions'] = SuggestionCopy.objects.all()[:5]
return ctx
def get_object(self, queryset=None):
if queryset is None:
queryset = self.get_queryset()
try:
return queryset.order_by('?')[0]
except IndexError:
raise ImproperlyConfigured('No suggestions are installed')
def render_to_response(self, ctx, **kw):
if 'format' in self.request.GET:
if self.request.GET['format'] == 'json':
return HttpResponse(
json.dumps(
{
'id': ctx['object'].id,
'suggestion': str(ctx['object']),
'url': ctx['object'].get_absolute_url(),
'split_text': ctx['object'].split()
}
),
content_type='application/json'
)
return HttpResponseBadRequest('Format not supported')
return super(IndexView, self).render_to_response(ctx, **kw)
class SuggestionView(DetailView):
"""
A view for a single Suggestion
"""
model = Suggestion
class LoginRequiredMixin(object):
"""
Mixin to ensure a user is logged in; basically applies the login_required
decorator from auth module.
"""
@method_decorator(login_required)
def dispatch(self, *ar, **kw):
return super(LoginRequiredMixin, self).dispatch(*ar, **kw)
class GetSuggestionCopyQSMixin(object):
"""
We want to get the 10 latest suggestions that the user has had copied
or if there are none we create a new one.
"""
def get_queryset(self):
queryset = self.request.user.suggestions.all()[:10]
if not queryset.count():
SuggestionCopy.objects.create_random_for_user(self.request.user)
return self.request.user.suggestions.all()[:10]
return queryset
class UserView(LoginRequiredMixin, GetSuggestionCopyQSMixin, ListView):
"""
The logged in user's view
"""
template_name = 'suggestions/user.html'
class JSONResponseMixin(object):
def render_to_response(self, ctx, **kw):
return HttpResponse(json.dumps(ctx), 'application/json')
class SkipSuggestionView(
LoginRequiredMixin,
GetSuggestionCopyQSMixin,
JSONResponseMixin,
View
):
"""
Skip over the current suggestion for the user and return a new suggestion
"""
def get(self, request, *ar, **kw):
self.get_queryset()[0].delete()
SuggestionCopy.objects.create_random_for_user(self.request.user)
queryset = self.get_queryset()
return self.render_to_response({'suggestion': queryset[0].data})
class GetSuggestionCopySingleMixin(object):
def get_queryset(self):
queryset = self.request.user.suggestions.all()
if not queryset.count():
SuggestionCopy.objects.create_random_for_user(self.request.user)
return self.request.user.suggestions.all()
return queryset
def get_object(self, id):
return get_object_or_404(self.get_queryset(), pk=id)
class ActionSuggestionView(
LoginRequiredMixin,
GetSuggestionCopySingleMixin,
JSONResponseMixin,
View
):
"""
Mark the current suggestion for the user as actioned and return a new
suggestion
"""
def get(self, request, *ar, **kw):
obj = self.get_object(kw['id'])
obj.suggestion.actioned_by.add(request.user)
suggestion = SuggestionCopy.objects.create_random_for_user(
request.user
)
return self.render_to_response({'suggestion': suggestion.data})
class LikeSuggestionView(
LoginRequiredMixin,
GetSuggestionCopySingleMixin,
JSONResponseMixin,
View
):
"""
Mark a suggestion as liked by the user and return the amount of likes
"""
def get(self, request, *ar, **kw):
obj = self.get_object(kw['id'])
obj.suggestion.liked_by.add(request.user)
return self.render_to_response(
{'likes': obj.suggestion.liked_by.count()}
)
class PutBackView(
LoginRequiredMixin,
GetSuggestionCopySingleMixin,
JSONResponseMixin,
View
):
"""
Put a crossed suggestion back to current by making a copy of it
"""
def get(self, request, *ar, **kw):
obj = self.get_object(kw['id'])
self.get_queryset()[0].delete()
suggestion = SuggestionCopy.objects.create_from_suggestion_for_user(
obj.suggestion,
request.user
)
return self.render_to_response({'suggestion': suggestion.data})
class UpdateTextView(
LoginRequiredMixin,
GetSuggestionCopySingleMixin,
JSONResponseMixin,
View
):
"""
Update the text for "them" in the selected suggestion
"""
def post(self, request, *ar, **kw):
obj = self.get_object(kw['id'])
if not 'text' in request.POST:
return HttpResponseBadRequest('No text supplied')
obj.them_text = request.POST['text']
obj.save()
return self.render_to_response({'status': 'success'})
class AddSuggestionView(LoginRequiredMixin, CreateView):
"""
Allow a logged in user to add their own suggestion for review that can
be added (by an admin) to the pool of suggestions given on the site
"""
template_name = 'suggestions/add.html'
form_class = AddSuggestionForm
def get_success_url(self):
return reverse('suggestions:add')
def form_valid(self, form):
form.save(self.request.user)
messages.success(self.request, _('Thank you for your suggestion'))
return HttpResponseRedirect(self.get_success_url())
|
rvause/djangodash2013
|
suggestions/views.py
|
views.py
|
py
| 6,614 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "django.views.generic.DetailView",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "models.Suggestion",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "models.SuggestionCopy.objects.all",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "models.SuggestionCopy.objects",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "models.SuggestionCopy",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.core.exceptions.ImproperlyConfigured",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseBadRequest",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "django.views.generic.DetailView",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "models.Suggestion",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "django.utils.decorators.method_decorator",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 72,
"usage_type": "argument"
},
{
"api_name": "models.SuggestionCopy.objects.create_random_for_user",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "models.SuggestionCopy.objects",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "models.SuggestionCopy",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "django.views.generic.ListView",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "django.views.generic.View",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "models.SuggestionCopy.objects.create_random_for_user",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "models.SuggestionCopy.objects",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "models.SuggestionCopy",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "models.SuggestionCopy.objects.create_random_for_user",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "models.SuggestionCopy.objects",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "models.SuggestionCopy",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "django.views.generic.View",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "models.SuggestionCopy.objects.create_random_for_user",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "models.SuggestionCopy.objects",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "models.SuggestionCopy",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "django.views.generic.View",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "django.views.generic.View",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "models.SuggestionCopy.objects.create_from_suggestion_for_user",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "models.SuggestionCopy.objects",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "models.SuggestionCopy",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "django.views.generic.View",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseBadRequest",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "django.views.generic.CreateView",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "forms.AddSuggestionForm",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "django.core.urlresolvers.reverse",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 217,
"usage_type": "call"
}
] |
27773506390
|
import os
import asyncio
from telepyrobot.setclient import TelePyroBot
from pyrogram import filters
from pyrogram.types import Message, ChatPermissions
from telepyrobot import COMMAND_HAND_LER
from telepyrobot.utils.admin_check import admin_check
__PLUGIN__ = os.path.basename(__file__.replace(".py", ""))
__help__ = f"""
Commands to help you manage a chat.
`{COMMAND_HAND_LER}leavechat`: Exit from the Group.
Usage: {COMMAND_HAND_LER}leavechat
`{COMMAND_HAND_LER}invitelink`: Gives the invitelink of the Group.
Usage: {COMMAND_HAND_LER}invitelink
`{COMMAND_HAND_LER}setchatpic`: Changes the Picture of Group.
Usage: {COMMAND_HAND_LER}setchatpic (as a reply to the message)
`{COMMAND_HAND_LER}delchatpic`: Removes the Picture of Group.
Usage: {COMMAND_HAND_LER}delchatpic (as a reply to the message)
`{COMMAND_HAND_LER}setchatname`: Renames the Group.
Usage: {COMMAND_HAND_LER}setchatname (chatname or as a reply to the message)
`{COMMAND_HAND_LER}setchatdesc`: Sets the Description of the Group.
Usage: {COMMAND_HAND_LER}setchatdesc (chatdesc or as a reply to the message)
"""
@TelePyroBot.on_message(filters.command("leavechat", COMMAND_HAND_LER) & filters.me)
async def leavechat(c: TelePyroBot, m: Message):
if m.chat.type in ["group", "supergroup"]:
chat_id = m.chat.id
is_admin = await admin_check(c, m)
if not is_admin:
return
await c.leave_chat(chat_id, delete=True)
return
@TelePyroBot.on_message(filters.command("invitelink", COMMAND_HAND_LER) & filters.me)
async def invitelink(c: TelePyroBot, m: Message):
is_admin = await admin_check(c, m)
if not is_admin:
return
chat_id = m.chat.id
link = await c.export_chat_invite_link(chat_id)
await m.edit_text(f"**Link for Chat:**\n`{link}`")
return
@TelePyroBot.on_message(filters.command("setchatpic", COMMAND_HAND_LER) & filters.me)
async def set_picture(c: TelePyroBot, m: Message):
if m.chat.type in ["group", "supergroup"]:
is_admin = await admin_check(c, m)
if not is_admin:
return
await m.edit_text("`Tring to Change Group Picture....`")
chat_id = m.chat.id
try:
if m.reply_to_message and m.reply_to_message.media:
file_id = m.reply_to_message.photo.file_id
file_ref = m.reply_to_message.photo.file_ref
await c.set_chat_photo(chat_id, file_id, file_ref=file_ref)
await m.edit_text(f"`{m.chat.type.title()} picture has been set.`")
else:
await m.edit_text("`Reply to an image to set that as group pic`")
except Exception as ef:
await m.edit_text(f"**Could not Change Chat Pic due to:**\n`{ef}`")
return
@TelePyroBot.on_message(filters.command("delchatpic", COMMAND_HAND_LER) & filters.me)
async def delchatpic(c: TelePyroBot, m: Message):
is_admin = await admin_check(c, m)
if not is_admin:
return
chat_id = m.chat.id
try:
await c.delete_chat_photo(chat_id)
await m.edit_text(f"`Deleted Chat Picture for {m.chat.title}`")
except Exception as ef:
await m.edit_text(f"Error deleting Chat Pic due to:\n`{ef}`")
@TelePyroBot.on_message(filters.command("setchatname", COMMAND_HAND_LER) & filters.me)
async def setchatname(c: TelePyroBot, m: Message):
await m.edit_text("__Trying to Change Chat Name!__")
is_admin = await admin_check(c, m)
if not is_admin:
return
chat_id = m.chat.id
chat_title = m.text.split(None, 1)
if m.reply_to_message:
chat_title = m.reply_to_message.text
else:
chat_title = chat_title[1]
try:
await c.set_chat_title(chat_id, chat_title)
await m.edit_text(f"<b>Changed Chat Name to:</b> <code>{chat_title}</code>")
except Exception as ef:
await m.edit_text(f"**Could not Change Chat Title due to:**\n`{ef}`")
@TelePyroBot.on_message(filters.command("setchatdesc", COMMAND_HAND_LER) & filters.me)
async def setchatdesc(c: TelePyroBot, m: Message):
await m.edit_text("__Trying to Change Chat Desciption!__")
is_admin = await admin_check(c, m)
if not is_admin:
return
chat_id = m.chat.id
chat_desc = m.text.split(None, 1)
if m.reply_to_message:
chat_desc = m.reply_to_message.text
else:
chat_desc = chat_desc[1]
try:
await c.set_chat_description(chat_id, chat_desc)
await m.edit_text(
f"<b>Changed Chat Description to:</b> <code>{chat_desc}</code>"
)
except Exception as ef:
await m.edit_text(f"**Could not Change Chat Desciption due to:**\n`{ef}`")
|
Divkix/TelePyroBot
|
telepyrobot/plugins/chat.py
|
chat.py
|
py
| 4,652 |
python
|
en
|
code
| 40 |
github-code
|
6
|
[
{
"api_name": "os.path.basename",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "telepyrobot.COMMAND_HAND_LER",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "telepyrobot.COMMAND_HAND_LER",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "telepyrobot.COMMAND_HAND_LER",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "telepyrobot.COMMAND_HAND_LER",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "telepyrobot.COMMAND_HAND_LER",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "telepyrobot.COMMAND_HAND_LER",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "telepyrobot.COMMAND_HAND_LER",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "telepyrobot.COMMAND_HAND_LER",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "telepyrobot.COMMAND_HAND_LER",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "telepyrobot.COMMAND_HAND_LER",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "telepyrobot.COMMAND_HAND_LER",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "telepyrobot.COMMAND_HAND_LER",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "telepyrobot.setclient.TelePyroBot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "pyrogram.types.Message",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "telepyrobot.utils.admin_check.admin_check",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "telepyrobot.setclient.TelePyroBot.on_message",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "telepyrobot.setclient.TelePyroBot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "pyrogram.filters.command",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "telepyrobot.COMMAND_HAND_LER",
"line_number": 34,
"usage_type": "argument"
},
{
"api_name": "pyrogram.filters",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "pyrogram.filters.me",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "telepyrobot.setclient.TelePyroBot",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "pyrogram.types.Message",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "telepyrobot.utils.admin_check.admin_check",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "telepyrobot.setclient.TelePyroBot.on_message",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "telepyrobot.setclient.TelePyroBot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "pyrogram.filters.command",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "telepyrobot.COMMAND_HAND_LER",
"line_number": 45,
"usage_type": "argument"
},
{
"api_name": "pyrogram.filters",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "pyrogram.filters.me",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "telepyrobot.setclient.TelePyroBot",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "pyrogram.types.Message",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "telepyrobot.utils.admin_check.admin_check",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "telepyrobot.setclient.TelePyroBot.on_message",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "telepyrobot.setclient.TelePyroBot",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "pyrogram.filters.command",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "telepyrobot.COMMAND_HAND_LER",
"line_number": 56,
"usage_type": "argument"
},
{
"api_name": "pyrogram.filters",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "pyrogram.filters.me",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "telepyrobot.setclient.TelePyroBot",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "pyrogram.types.Message",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "telepyrobot.utils.admin_check.admin_check",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "telepyrobot.setclient.TelePyroBot.on_message",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "telepyrobot.setclient.TelePyroBot",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "pyrogram.filters.command",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "telepyrobot.COMMAND_HAND_LER",
"line_number": 77,
"usage_type": "argument"
},
{
"api_name": "pyrogram.filters",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "pyrogram.filters.me",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "telepyrobot.setclient.TelePyroBot",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "pyrogram.types.Message",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "telepyrobot.utils.admin_check.admin_check",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "telepyrobot.setclient.TelePyroBot.on_message",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "telepyrobot.setclient.TelePyroBot",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "pyrogram.filters.command",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "telepyrobot.COMMAND_HAND_LER",
"line_number": 90,
"usage_type": "argument"
},
{
"api_name": "pyrogram.filters",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "pyrogram.filters.me",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "telepyrobot.setclient.TelePyroBot",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "pyrogram.types.Message",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "telepyrobot.utils.admin_check.admin_check",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "telepyrobot.setclient.TelePyroBot.on_message",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "telepyrobot.setclient.TelePyroBot",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "pyrogram.filters.command",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "telepyrobot.COMMAND_HAND_LER",
"line_number": 109,
"usage_type": "argument"
},
{
"api_name": "pyrogram.filters",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "pyrogram.filters.me",
"line_number": 109,
"usage_type": "attribute"
}
] |
12100194486
|
import unittest
import itertools
from functools import partial
from bst import BST
def _factory(l):
_b = BST(l[0])
for item in l[1:]:
_b.insert(item)
return _b
class TestBST(unittest.TestCase):
def _check_node(self, node, item, left_child, right_child):
self.assertEqual(item, node.item)
if left_child is None:
self.assertIsNone(node.left)
else:
self.assertEqual(left_child, node.left.item)
if right_child is None:
self.assertIsNone(node.right)
else:
self.assertEqual(right_child, node.right.item)
def test_sanity(self):
# Act
b = BST(1)
# Assert
self.assertEqual(1, b.item)
self.assertEqual(None, b.left)
self.assertEqual(None, b.right)
def test_insert_1(self):
# Arrange + Act
b = BST(1)
b.insert(2)
b.insert(3)
# Assert
self._check_node(b, 1, None, 2)
self._check_node(b.right, 2, None, 3)
self._check_node(b.right.right, 3, None, None)
def test_insert_2(self):
# Arrange + Act
b = BST(1)
b.insert(3)
b.insert(2)
# Assert
self._check_node(b, 1, None, 3)
self._check_node(b.right, 3, 2, None)
self._check_node(b.right.left, 2, None, None)
def test_insert_3(self):
# Arrange + Act
b = BST(2)
b.insert(1)
b.insert(3)
# Assert
self._check_node(b, 2, 1, 3)
self._check_node(b.left, 1, None, None)
self._check_node(b.right, 3, None, None)
def test_insert_4(self):
# Arrange + Act
b = BST(2)
b.insert(3)
b.insert(1)
# Assert
self._check_node(b, 2, 1, 3)
self._check_node(b.left, 1, None, None)
self._check_node(b.right, 3, None, None)
def test_insert_5(self):
# Arrange + Act
b = BST(3)
b.insert(1)
b.insert(2)
# Assert
self._check_node(b, 3, 1, None)
self._check_node(b.left, 1, None, 2)
self._check_node(b.left.right, 2, None, None)
def test_insert_6(self):
# Arrange + Act
b = BST(3)
b.insert(2)
b.insert(1)
# Assert
self._check_node(b, 3, 2, None)
self._check_node(b.left, 2, 1, None)
self._check_node(b.left.left, 1, None, None)
# Now that we tested insert(), we can use _factory!
def test__eq__when_equal(self):
# Arrange
b1 = _factory([2,3,1])
b2 = _factory([2,1,3])
#Assert
self.assertEqual(b1, b2)
def test__eq__when_not_equal(self):
# Arrange
l1 = _factory([1,2,3])
l2 = _factory([1,3,2])
#Assert
self.assertFalse(l1 == l2)
def test_search_contains_return_list(self):
# Arrange
b = _factory([1,3,2,4])
expected = _factory([3,4,2])
# Act
actual = b.search(3)
# Assert
self.assertEqual(expected, actual)
def test_search_not_contains_return_None(self):
# Arrange
b = _factory([1,2,3,4,5])
# Act + Assert
self.assertIsNone(b.search(6))
def test_min_max(self):
for perm in itertools.permutations(range(5)):
self.assertEqual(0, _factory(perm).min().item)
self.assertEqual(4, _factory(perm).max().item)
def test_inorder(self):
b = _factory([2,1,3])
self.assertEqual([1,2,3], b.inorder())
def test_preorder(self):
b = _factory([2,1,3])
self.assertEqual([2,1,3], b.preorder())
def test_posorder(self):
b = _factory([2,1,3])
self.assertEqual([1,3,2], b.postorder())
def test_search_parent(self):
b = _factory([2,1,7,8,4,3,6,5])
self.assertEqual(_factory([4,3,6,5]), b._search_parent(6))
self.assertEqual(_factory([4,3,6,5]), b._search_parent(3))
def test_delete_delete_childess_node(self):
b = _factory([2,1,7,8,4,3,6,5])
self.assertEqual(_factory([2,1,7,8,4,6,5]), b.delete(3))
def test_delete_delete_node_with_one_child_left(self):
b = _factory([2,1,7,8,4,3,6,5])
self.assertEqual(_factory([2,1,7,8,4,3,5]), b.delete(6))
def test_delete_delete_node_with_one_child_right(self):
b = _factory([2,1,7,8,4,3,6,5,9])
self.assertEqual(_factory([2,1,7,4,3,6,5,9]), b.delete(8))
def test_delete_delete_node_with_two_children(self):
b = _factory([2,1,7,8,4,3,6,5])
self.assertEqual(_factory([2,1,7,8,5,3,6]), b.delete(4))
def test_delete_delete_node_with_two_children_min_with_right_subtree(self):
b = _factory([2,1,7,8,4,3,6,5,5.8,5.7,5.9])
self.assertEqual(_factory([2,1,7,8,5,3,6,5.8,5.7,5.9]), b.delete(4))
def test__repr__(self):
# Arrange
b = _factory([1,3,2])
# Act + Assert
self.assertEqual('BST(item=1, left=None, right=BST(item=3, left=BST(item=2, left=None, right=None), right=None))', b.__repr__())
if __name__ == '__main__':
unittest.main()
|
Shaywei/MyDevTools
|
Python/BasicDataStructures/bst_tests.py
|
bst_tests.py
|
py
| 5,123 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "bst.BST",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "bst.BST",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "bst.BST",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "bst.BST",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "bst.BST",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "bst.BST",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "bst.BST",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "bst.BST",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "itertools.permutations",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "unittest.main",
"line_number": 190,
"usage_type": "call"
}
] |
40201462407
|
import cv2
import numpy as np
import pandas as pd
import json
from scipy.spatial.distance import cdist
import os
# Get fps of given video
def getFps(path):
vidObj = cv2.VideoCapture(path)
fps = vidObj.get(cv2.CAP_PROP_FPS)
print("Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps))
vidObj.release()
return int(fps)
# Given the path to video, the fps and the second of needed frame, it returns the frame in jpg format ( needed for testing and trials )
def saveFrame(path, fps, frame_no):
# Path to video file
vidObj = cv2.VideoCapture(path)
# Used as counter variable
count = 0
success = 1
frame_count = 0
while success:
success, img = vidObj.read()
if frame_count == frame_no:
cv2.imwrite("frame"+str(frame_count)+".jpg",img)
break
# Catch the frames per second
if count % fps == 0:
frame_count = frame_count + 1
count += 1
# Path: Path to the video to capture descriptors
# Fps: Fps of the video
# Interval: Array with two elements that indicate the start and end time of video to capture ([0,420] for first 7 min)
# No_of_descriptors: SIFT captures many descriptors most of which are unnecessary. This parameter determines the number of descriptors to capture with biggest blobs.
# Can be reduced to some extent with efficiency concerns.
# Folder_to_save: Descriptors are saved to a subfolder under ./descriptors. Name of the subfolder should be given.
# Function saves 3 files:
# * address.json: Mapping of descriptors to frames ({"352":2} means descriptor in 352. row is the first descriptor of frame 2)
# * descriptors.npy: A 2d numpy array where each row is a descriptor (which is a 128 byte array). Each frame has no_of_descriptors rows in this array.
# * angles.npy: A 2d array that keeps principle angle of each keypoint in a frame in each row.
# (Each row has no_of_descriptors elements since there are no_of_descriptors keypoints for each frame. And there are as many rows as the number of frames captured.)
# Ex. interval = [20,40] and no_of_descriptors = 150
# Then the frames between 20. and 40. seconds of the given video are analyzed.
# descriptors.npy will have the shape (150*20, 128) since each row is a descriptor and total number of descriptors is 150*20
# angles.npy will have the shape (20,150) since each row is a frame and each descriptor is a column
def captureDescriptors(path, fps, interval, folder_to_save, no_of_descriptors=150):
# Path to video file
vidObj = cv2.VideoCapture(path)
# Used as counter variable
count = 0
success = 1
start = interval[0]
end = interval[1]
detect = cv2.xfeatures2d.SIFT_create(no_of_descriptors)
all_desc = None
all_angles =[]
for i in range(start):
all_angles.append([])
first = True
rowcount = 0
frame_address = {} # the mapping from row of decriptors to the frame number
frame_count = start # we catch the frame by second
while success:
if (count / fps) >= end:
break
success, img = vidObj.read()
if (count / fps) < start:
count += 1
continue
# Catch the frames per second
if count % fps == 0:
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
keypoints, descriptors = detect.detectAndCompute(img,None)
angles = [int(key.angle) for key in keypoints]
all_angles.append(angles)
if first:
all_desc = descriptors
first = False
else:
all_desc = np.concatenate((all_desc, descriptors))
frame_address[rowcount] = frame_count
rowcount = rowcount + len(descriptors)
frame_count = frame_count + 1
count += 1
if not os.path.exists("./descriptors/"+folder_to_save):
os.mkdir("./descriptors/"+folder_to_save)
np.save("./descriptors/"+folder_to_save+"/angles", all_angles)
np.save("./descriptors/"+folder_to_save+"/descriptors", all_desc)
with open('./descriptors/'+folder_to_save+'/address.json', 'w') as fp:
json.dump(frame_address, fp)
print("Features saved")
# Path: Path to the video to analyze
# Fps: Fps of the video
# Interval: Array with two elements that indicate the start and end time of video to analyze ([420,840] between 7. and 14. mins)
# No_of_descriptors: SIFT captures many descriptors most of which are unnecessary. This parameter determines the number of descriptors to capture with biggest blobs
# Desc: descriptors.npy which is obtained by captureDescriptors()
# Sq: address.json which is obtained by captureDescriptors()
# Ang: angles.npy which is obtained by captureDescriptors()
# Ratio: When a descriptor is compared to a set of descriptors, we call the most similar pair a "match".
# To call it a "good match", we need that the distance of the match must me smaller than a ratio of the second best match.
# If ratio = 0.7, distances of first two matches are d1 and d2, the match with distance of d1 is a good match if d1 < 0.7*d2.
# We only count the good matches, thus ratio is an important parameter.
# Dumpfile: The file to write the matching results. (need to be a .csv)
# Function reads the given interval of the video, extracts the SIFT features of each frame, then compares the features with the ones in database.
# For our case, the database is given with desc, sq, ang. This can be changed. With the comparison, match results are written to a .csv file.
def analyzeFrames(path, interval, desc, sq, ang, no_of_descriptors, fps, dumpfile, ratio = 0.75):
# Path to video file
vidObj = cv2.VideoCapture(path)
# Used as counter variable
count = 0
success = 1
start = interval[0]
end = interval[1]
detect = cv2.xfeatures2d.SIFT_create(no_of_descriptors)
first = True
while success:
if (count / fps) >= end:
break
success, img = vidObj.read()
if (count / fps) < start:
count += 1
continue
# Catch the frames per second
if count % fps == 0:
frame_no = int(count/fps)
print(frame_no)
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
new_keypoints, new_descriptors = detect.detectAndCompute(img,None)
angles = [int(key.angle) for key in new_keypoints]
d = np.array(cdist(new_descriptors, desc))
matches, matched, glob_match = getMatchFromDistance(sq, d, ratio)
startidx = 0
for key, value in sq.items():
if value == matched:
startidx = int(key)
break
matched_ang1 = []
matched_ang2 = []
for m in glob_match:
new_idx = m[0]
old_idx = m[1]
if old_idx>=startidx and old_idx <startidx + no_of_descriptors:
idx = old_idx - startidx
angle1 = angles[new_idx]
angle2 = ang[matched][idx]
matched_ang1.append(angle1)
matched_ang2.append(angle2)
angle, _ = detectAngle(matched_ang1, matched_ang2)
writeMatches(frame_no, len(sq), matches, matched, angle, first, dumpfile)
if first:
first = False
count += 1
# d: The distance matrix between descriptors of a frame and the set of descriptors in the database.
# Shape of d is (n,m) if current frame has n descriptors and there are m descriptors in database.
# d_ij = Distance between the ith descriptor of the frame and jth descriptor in the database.
# Function returns 3 things:
# * matches: An array that counts the number of matches between the current frame and each of the frames in database.
# * matched: argmax(matches) , the frame that is the best match of the current frame (test frame)
# * glob_match: An array of tuples where each element (i,j) is a pair of indices of matched descriptors.
# (i,j) means that ith descriptor of test frame is matched with jth descriptor in database. We get this to find relative angles.
def getMatchFromDistance(sq, d, ratio):
rows, _ = d.shape
matches = [0 for _ in range(len(sq))]
indices = []
glob_match = []
for i in range(rows):
row = d[i]
min1, min2 = np.partition(row, 1)[0:2]
if min1 < ratio*min2:
# means this is a good match
idx = np.where(row == min1)[0][0]
indices.append(idx)
glob_match.append((i,idx))
for idx in indices:
last = '0'
for k in sq:
if idx > int(k):
last = k
continue
else:
matched_square = sq[last]
matches[matched_square] += 1
break
matched = np.argmax(matches)
return matches, matched, glob_match
# http://amroamroamro.github.io/mexopencv/matlab/cv.SIFT.detectAndCompute.html
# Gets two arrays of angles to compare. Arrays have one to one correspondence. That is, ith elements of both arrays belong to matched keypoints.
# Difference between each corresponding pair of angles is calculated.
# The most common difference is inferred to be the relative angle between test frame and matched database frame.
def detectAngle(angles1, angles2):
counter = np.array([0 for i in range(360) ])
for i in range(len(angles1)):
diff = angles1[i] - angles2[i]
if diff < 0:
diff += 360
counter[diff] += 1
return np.argmax(counter), np.max(counter)
# Matching results are written to a csv file.
def writeMatches(frame_no, no_of_frames, matches, matched, angle, first, dumpfile):
if not os.path.exists("./matches"):
os.mkdir("./matches")
total_matches = sum(matches)
max_match = matches[matched]
if not first:
df = pd.read_pickle(dumpfile)
else:
columns = ["Frame no","Matched Frame", "Angle" ,"Total Matches", "Max Match"]
for i in range(no_of_frames):
columns.append(i)
df = pd.DataFrame(columns=columns)
dic = {"Frame no": [frame_no], "Matched Frame": [matched], "Angle":[angle], "Total Matches":[total_matches], "Max Match":[max_match]}
for i in range(no_of_frames):
dic[i] = [matches[i]]
df2 = pd.DataFrame(dic, index=[0])
df = pd.concat([df, df2], sort=False)
df.to_pickle(dumpfile)
# folder name of the run, will appear under matches directory
folder = "whitesquares"
# parameters of captureDescriptors()
train_video = "./videos/karolar_2.mov"
train_fps = 30
train_interval = [0,430]
train_descriptors = 150
# parameters of analyzeFrames()
query_video = "./videos/karolar_2.mov"
query_fps = 30
query_interval = [430,1320]
query_descriptors = 150
ratio = 0.75
# make it false if the descriptors in the database are being used
train = True
test = False
if train:
captureDescriptors(path = train_video,fps = train_fps, interval = train_interval, folder_to_save = folder, no_of_descriptors = train_descriptors)
if test:
with open('./descriptors/'+folder+'/address.json', 'r') as fp:
sq = json.load(fp)
with open('./descriptors/'+folder+'/descriptors.npy', 'rb') as f:
desc = np.load(f)
with open('./descriptors/'+folder+'/angles.npy', 'rb') as f:
ang = np.load(f,allow_pickle=True)
analyzeFrames(path = query_video, interval = query_interval, desc = desc, sq = sq, ang = ang, no_of_descriptors = query_descriptors,
fps = query_fps, folder = './matches/'+folder+'.csv', ratio = ratio)
df = pd.read_pickle("./matches/"+folder+".csv")
df.to_csv("./matches/"+folder+".csv")
|
orhungorkem/SIFTDetector
|
main.py
|
main.py
|
py
| 11,935 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.VideoCapture",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_FPS",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "cv2.xfeatures2d.SIFT_create",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "cv2.xfeatures2d",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "cv2.xfeatures2d.SIFT_create",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "cv2.xfeatures2d",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.distance.cdist",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "numpy.partition",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 266,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "pandas.read_pickle",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "pandas.read_pickle",
"line_number": 333,
"usage_type": "call"
}
] |
6114242445
|
import argparse
import gym
import numpy as np
from itertools import count
from collections import namedtuple
from functools import reduce
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
from tensorboardX import SummaryWriter
from hebbian_learning.models.equilibrium_propagation_value import Equilibrium_Propagation_Value_Network
from hebbian_learning.models.qt_opt_equil_prop import Qt_Opt_Equil_Prop
from hebbian_learning.models.mlp import MLP
from hebbian_learning.models.qt_opt import Qt_Opt
parser = argparse.ArgumentParser(description='PyTorch RL Example')
parser.add_argument('--equil_prop', type=bool, default=True)
parser.add_argument('--seed', type=int, default=1337)
parser.add_argument('--render', type=bool, default=True)
parser.add_argument('--log-interval', type=int, default=1)
# Equil Prop
parser.add_argument('--energy_learn_rate', type=float, default=0.1)
parser.add_argument('--learning_rate', type=float, default=0.01)
parser.add_argument('--epsilon', type=float, default=0.9)
parser.add_argument('--gamma', type=float, default=0.99)
# parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--target_replace_period', type=int, default=10)
# parser.add_argument('--memory_capacity', type=int, default=256)
parser.add_argument('--num_hidden', type=int, default=64)
parser.add_argument('--n_iterations', type=int, default=1)
parser.add_argument('--n_iterations_neg', type=int, default=1)
parser.add_argument('--beta', type=float, default=0.5)
# MLP
# parser.add_argument('--learning_rate', type=float, default=0.01)
# parser.add_argument('--gamma', type=float, default=0.99)
# parser.add_argument('--epsilon', type=float, default=0.95)
# parser.add_argument('--batch_size', type=int, default=16)
# parser.add_argument('--target_replace_period', type=int, default=10)
# parser.add_argument('--memory_capacity', type=int, default=256)
# parser.add_argument('--num_hidden', type=int, default=64)
args = parser.parse_args()
# args.beta = -np.log(1-args.beta)
# env = gym.make('MountainCar-v0')
env = gym.make('CartPole-v0')
# env = env.unwrapped
env.seed(args.seed)
torch.manual_seed(args.seed)
writer = SummaryWriter()
N_ACTIONS = 1 # env.action_space.n
N_STATES = env.observation_space.shape[0]
def main():
if args.equil_prop:
network = Equilibrium_Propagation_Value_Network(N_STATES + N_ACTIONS, 1, args.num_hidden,
args.energy_learn_rate, args.learning_rate, args.n_iterations,
args.n_iterations_neg, args.beta)
rl_model = Qt_Opt_Equil_Prop(network, N_STATES, N_ACTIONS, args.target_replace_period, args.epsilon, args.gamma)
else:
network = MLP(N_STATES + N_ACTIONS, 1, args.num_hidden, args.learning_rate)
rl_model = Qt_Opt(network, N_STATES, N_ACTIONS, args.memory_capacity,
args.batch_size, args.target_replace_period, args.epsilon, args.gamma)
running_reward = 20
for i_episode in range(100000):
s = env.reset()
ep_r = 0
total_cost = 0
for t in range(100000):
if args.render:
env.render()
a = rl_model.choose_action(s)
s_, r, done, info = env.step(a)
if not args.equil_prop:
rl_model.store_transition(s, a, r, done, s_)
rl_model.learn()
else:
cost = rl_model.learn(s, a, r, done, s_)
s = s_
ep_r += r
total_cost += cost
if done:
writer.add_scalar('data/episode_reward', t, i_episode)
writer.add_scalar('data/average_cost', total_cost / t, i_episode)
running_reward = running_reward * 0.99 + ep_r * 0.01
print('Episode {}\treward: {:.2f}\tAverage reward: {:.2f}'.format(
i_episode, ep_r, running_reward))
break
env.close()
if __name__ == '__main__':
main()
|
ltecot/emergence_properties
|
hebbian_learning/envs/cartpole.py
|
cartpole.py
|
py
| 4,095 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "gym.make",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "torch.manual_seed",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "tensorboardX.SummaryWriter",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "hebbian_learning.models.equilibrium_propagation_value.Equilibrium_Propagation_Value_Network",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "hebbian_learning.models.qt_opt_equil_prop.Qt_Opt_Equil_Prop",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "hebbian_learning.models.mlp.MLP",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "hebbian_learning.models.qt_opt.Qt_Opt",
"line_number": 65,
"usage_type": "call"
}
] |
39672933944
|
import pytest
import common
@pytest.mark.parametrize(
"data,start,end",
[
("0-0", 0, 0),
("11-22", 11, 22),
],
)
def test_parse(data: str, start: int, end: int):
assert common.SectionRange.parse(data) == common.SectionRange(start, end)
@pytest.mark.parametrize(
"range1,range2,res",
[
("1-2", "3-4", False),
("1-2", "2-3", False),
("1-2", "2-2", True),
("1-3", "2-2", True),
("2-2", "2-2", True),
("2-3", "2-2", True),
("2-3", "1-2", False),
("2-3", "1-1", False),
],
)
def test_contains(range1: str, range2: str, res: bool):
sr1 = common.SectionRange.parse(range1)
sr2 = common.SectionRange.parse(range2)
assert sr1.contains(sr2) == res
@pytest.mark.parametrize(
"range1,range2,res",
[
("1-2", "3-4", False),
("1-2", "2-3", True),
("1-2", "2-2", True),
("1-3", "2-2", True),
("2-2", "2-2", True),
("2-3", "2-2", True),
("2-3", "1-2", True),
("2-3", "1-1", False),
],
)
def test_overlaps(range1: str, range2: str, res: bool):
sr1 = common.SectionRange.parse(range1)
sr2 = common.SectionRange.parse(range2)
assert sr1.overlaps(sr2) == res
|
cmatsuoka/aoc
|
2022 - expedition/04 - camp cleanup/test_common.py
|
test_common.py
|
py
| 1,252 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "common.SectionRange.parse",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "common.SectionRange",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "common.SectionRange.parse",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "common.SectionRange",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "common.SectionRange.parse",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "common.SectionRange",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "common.SectionRange.parse",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "common.SectionRange",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "common.SectionRange.parse",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "common.SectionRange",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 35,
"usage_type": "attribute"
}
] |
71456631549
|
import boto3
from operator import itemgetter
ecr_client = boto3.client('ecr')
repositories = ecr_client.describe_repositories()['repositories']
if len(repositories) == 0:
print("Repository is empty!")
for repo in repositories:
print(f"Repository name: {repo['repositoryName']}")
query_repository_name = "java-maven-app"
images = ecr_client.describe_images(
repositoryName=query_repository_name
)
image_tags = []
for image in images['imageDetails']:
image_tags.append({
'tag': image['imageTags'],
'date': image['imagePushedAt']
})
sorted_images_based_on_date = sorted(image_tags, key=itemgetter("date"), reverse=True)
for image in sorted_images_based_on_date:
print(image)
|
ArshaShiri/DevOpsBootcampPythonAutomationAssignment
|
ecr_in_aws.py
|
ecr_in_aws.py
|
py
| 728 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "boto3.client",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "operator.itemgetter",
"line_number": 27,
"usage_type": "call"
}
] |
12691211626
|
import os
from twilio.rest import Client
from urllib.request import urlopen
import re
import time
import smtplib
#need twilio credientials to run
account_sid = os.environ["TWILIO_ACCOUNT_SID"]
auth_token = os.environ["TWILIO_AUTH_TOKEN"]
twilio_number = os.environ["TWILIO_NUMBER"]
ubc_url = "https://courses.students.ubc.ca/cs/main?pname=subjarea&tname=subjareas&req=5&dept="
WAIT_TIME = 20
def sendMessage(message, phonenumber):
client = Client(account_sid, auth_token)
client.messages.create(
to=phonenumber,
from_=twilio_number,
body=message
)
def check_seats(url, user_info, regex_objects):
web_page_text = urlopen(url).read()
htmlText = web_page_text.decode("utf8")
general = re.search(regex_objects["general_seats"], htmlText)
restricted = re.search(regex_objects["restricted_seats"], htmlText)
temp_unavailable = htmlText.find("Note: this section is temp. unavailable")
print("Still looking...")
print("Restricted Seats: ", restricted.group(1))
print("General Seats: ", general.group(1))
if temp_unavailable != -1:
return 3
if not general or not restricted:
print("Something went wrong, maybe you put the wrong url in or lost internet connection, try restarting")
return 0
if general.group(1) != '0':
return 1
if restricted.group(1) != '0':
return 2
else:
return 0
def compile_regex():
regexs = {}
regexs["general_seats"] = re.compile("<td width='200px'>General Seats Remaining:</td><td align='left'><strong>(.*?)</strong></td>")
regexs["restricted_seats"] = re.compile("<td width='200px'>Restricted Seats Remaining\*:</td><td align='left'><strong>(.*?)</strong></td>")
return regexs
#gathers nessesary user info
def gather_user_info():
user_info = {}
user_info['department'] = input("Enter department:")
user_info["course_number"] = input("Enter course number: ")
user_info["section"] = input("Enter section number: ")
user_info["phone_number"] = input("Enter phone number:(in format +xxxxxxxxxxx) ")
user_info["restricted"] = input("Are restricted seats okay?(yes/no)")
return user_info
def main():
user_info = gather_user_info()
print(user_info['department'])
defined_url = ubc_url + user_info["department"] + "&course=" + user_info["course_number"] + "§ion=" + user_info["section"]
regex_objects = compile_regex()
compile_regex()
while True:
status = check_seats(defined_url, user_info, regex_objects)
if status == 1:
print("GENERAL SEAT AVAILABLE SENDING MESSAGE")
sendMessage('There is a general seat available in ' + user_info["department"] + ' ' + user_info["course_number"] + '! Grab it here: ' + defined_url, user_info["phone_number"])
break
if status == 2:
if user_info["restricted"] == "yes":
print("RESTRICTED SEAT AVAILABLE")
sendMessage('There is a restricted seat available in ' + user_info["department"] + ' ' + user_info["course_number"] + '! Grab it here: ' + defined_url, user_info["phone_number"])
break
if status == 3:
print("The course is temporarily unavailable")
time.sleep(WAIT_TIME)
else:
time.sleep(WAIT_TIME)
if __name__ == "__main__":
main()
|
benkenj/UBCCourseNotifier
|
UBCCourseNotifierMain.py
|
UBCCourseNotifierMain.py
|
py
| 3,405 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "twilio.rest.Client",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 87,
"usage_type": "call"
}
] |
4732908565
|
import xml.etree.ElementTree as ET
from datetime import datetime
from bs4 import BeautifulSoup
class XMLParser:
def __init__(self, file: str):
self.file = file
self.parameters = {'INPUT':{},
'DISCRIMINATOR':{},
'QDC':{},
'SPECTRA':{},
'REJECTIONS':{},
'ENERGY_CALIBRATION':{},
'SYNC':{},
'HARDWARE_COINCIDENCE':{},
'MISC':{}}
self.groups = list(self.parameters.keys())
self.reformatted = ['SRV_PARAM_CH_POLARITY','SRV_PARAM_CH_BLINE_NSMEAN','HARDWARE_COINCIDENCE','SRV_PARAM_START_MODE','SRV_PARAM_CH_SPECTRUM_NBINS','SRV_PARAM_CH_INDYN','SRV_PARAM_CH_CFD_FRACTION','SRV_PARAM_CH_DISCR_MODE','SRV_PARAM_CH_ENERGY_COARSE_GAIN','SRV_PARAM_TRGOUT_MODE']
self.reformatted_keys = ['polarity', 'baseline', 'coincidence', 'start', 'ebins', 'input_range', 'cfd', 'discriminator', 'coarse_gain', 'trig_out']
self.formatted = 0
self.board_formatted = False
def get_board_properties(self):
root = ET.parse(self.file).getroot()
name = root.find('board/label').text
id = root.find('board/id').text
model = root.find('board/modelName').text
adc_bits = root.find('board/adcBitCount').text
sample_rate = int(root.find('board/sampleTime').text)*10**6
dpp_type = root.find('board/dppType').text
roc = root.find('board/rocFirmware/major').text + '.' + root.find('board/rocFirmware/minor').text + ' build ' + str(hex(int(root.find('board/rocFirmware/build').text))).split('0x')[-1].zfill(4)
amc = root.find('board/amcFirmware/major').text + '.' + root.find('board/amcFirmware/minor').text + ' build ' + str(hex(int(root.find('board/amcFirmware/build').text))).split('0x')[-1].zfill(4)
link = root.find('board/connectionType').text + ' link #' + root.find('board/address').text
status = root.find('board/active').text
if status == 'true':
status = True
if status == 'false':
status = False
return name, id, model, adc_bits, sample_rate, dpp_type, roc, amc, link, status
def reformat(self, list_of_params):
"""
Reformats the values of the XML file.
Args:
list_of_params (list): Contains `polarity`, `baseline`, `coincidence`, `start`, `ebins`, `input_range`, `cfd`, `discriminator`, `coarse_gain`, 'trig_out`.
"""
if 'all' in list_of_params:
list_of_params.clear()
list_of_params = ['polarity', 'baseline', 'coincidence', 'start', 'ebins', 'input_range', 'cfd', 'discriminator', 'coarse_gain', 'trig_out']
#Formatting of the text values
if 'polarity' in list_of_params:
pol = self.parameters['INPUT']['SRV_PARAM_CH_POLARITY']
real_pol = pol.split('_')[-1]
self.parameters['INPUT']['SRV_PARAM_CH_POLARITY'] = real_pol
if 'baseline' in list_of_params:
bline = self.parameters['INPUT']['SRV_PARAM_CH_BLINE_NSMEAN']
real_bline = bline.split('_')[-1]
self.parameters['INPUT']['SRV_PARAM_CH_BLINE_NSMEAN'] = real_bline
if 'coincidence' in list_of_params:
coinc_mode = self.parameters['HARDWARE_COINCIDENCE']['SRV_PARAM_COINC_MODE']
real_coinc_mode = coinc_mode[11:]
self.parameters['HARDWARE_COINCIDENCE']['SRV_PARAM_COINC_MODE'] = real_coinc_mode
if 'start' in list_of_params:
start_mode = self.parameters['SYNC']['SRV_PARAM_START_MODE']
real_start_mode = start_mode[11:]
self.parameters['SYNC']['SRV_PARAM_START_MODE'] = real_start_mode
if 'ebins' in list_of_params:
energy_bins = self.parameters['SPECTRA']['SRV_PARAM_CH_SPECTRUM_NBINS']
real_energy_bins = energy_bins[5:] +'.0'
self.parameters['SPECTRA']['SRV_PARAM_CH_SPECTRUM_NBINS'] = real_energy_bins
if 'input_range' in list_of_params:
input_range = self.parameters['INPUT']['SRV_PARAM_CH_INDYN']
input_range = input_range.split('_')[1:]
real_input_range = input_range[0] + '.' + input_range[1]
self.parameters['INPUT']['SRV_PARAM_CH_INDYN'] = real_input_range
if 'cfd' in list_of_params:
cfd_frac = self.parameters['DISCRIMINATOR']['SRV_PARAM_CH_CFD_FRACTION']
real_cfd_frac = cfd_frac.split('_')[-1]
self.parameters['DISCRIMINATOR']['SRV_PARAM_CH_CFD_FRACTION'] = real_cfd_frac
if 'discriminator' in list_of_params:
disc_mode = self.parameters['DISCRIMINATOR']['SRV_PARAM_CH_DISCR_MODE']
disc_mode = disc_mode.split('_')[-1]
if disc_mode == "LED":
real_disc_mode = "Leading Edge Discriminator"
if disc_mode == "CFD":
real_disc_mode = "Constant Fraction Discriminator"
self.parameters['DISCRIMINATOR']['SRV_PARAM_CH_DISCR_MODE'] = real_disc_mode
if 'coarse_gain' in list_of_params:
coarse_gain = self.parameters['QDC']['SRV_PARAM_CH_ENERGY_COARSE_GAIN']
self.parameters['QDC']['SRV_PARAM_CH_ENERGY_COARSE_GAIN'] = coarse_gain.split('_')[1]
if 'trig_out' in list_of_params:
trig_out = self.parameters['SYNC']['SRV_PARAM_TRGOUT_MODE'].split('_')[2:]
real_trig_out =""
for elem in trig_out:
real_trig_out += elem + ' '
self.parameters['SYNC']['SRV_PARAM_TRGOUT_MODE'] = real_trig_out
#FIXING THE VALUES THAT ARE IN NANOSECONDS TO SECONDS FOR SPINMOB AUTOSCALING
if self.formatted == 0:
self.parameters['INPUT']['SRV_PARAM_RECLEN'] = float(self.parameters['INPUT']['SRV_PARAM_RECLEN'])*10**(-9)
self.parameters['INPUT']['SRV_PARAM_CH_PRETRG'] = float(self.parameters['INPUT']['SRV_PARAM_CH_PRETRG'])*10**(-9)
self.parameters['DISCRIMINATOR']['SRV_PARAM_CH_TRG_HOLDOFF'] = float(self.parameters['DISCRIMINATOR']['SRV_PARAM_CH_TRG_HOLDOFF'])*10**(-9)
self.parameters['DISCRIMINATOR']['SRV_PARAM_CH_CFD_DELAY'] = float(self.parameters['DISCRIMINATOR']['SRV_PARAM_CH_CFD_DELAY'])*10**(-9)
self.parameters['QDC']['SRV_PARAM_CH_GATE'] = float(self.parameters['QDC']['SRV_PARAM_CH_GATE'])*10**(-9)
self.parameters['QDC']['SRV_PARAM_CH_GATESHORT'] = float(self.parameters['QDC']['SRV_PARAM_CH_GATESHORT'])*10**(-9)
self.parameters['QDC']['SRV_PARAM_CH_GATEPRE'] = float(self.parameters['QDC']['SRV_PARAM_CH_GATEPRE'])*10**(-9)
self.parameters['SPECTRA']['SW_PARAMETER_TIME_DISTRIBUTION_CH_T0'] = float(self.parameters['SPECTRA']['SW_PARAMETER_TIME_DISTRIBUTION_CH_T0'])*10**(-9)
self.parameters['SPECTRA']['SW_PARAMETER_TIME_DISTRIBUTION_CH_T1'] = float(self.parameters['SPECTRA']['SW_PARAMETER_TIME_DISTRIBUTION_CH_T1'])*10**(-9)
self.parameters['SPECTRA']['SW_PARAMETER_TIME_DIFFERENCE_CH_T0'] = float(self.parameters['SPECTRA']['SW_PARAMETER_TIME_DIFFERENCE_CH_T0'])*10**(-9)
self.parameters['SPECTRA']['SW_PARAMETER_TIME_DIFFERENCE_CH_T1'] = float(self.parameters['SPECTRA']['SW_PARAMETER_TIME_DIFFERENCE_CH_T1'])*10**(-9)
self.parameters['HARDWARE_COINCIDENCE']['SRV_PARAM_COINC_TRGOUT'] = float(self.parameters['HARDWARE_COINCIDENCE']['SRV_PARAM_COINC_TRGOUT'])*10**(-9)
self.parameters['REJECTIONS']['SW_PARAMETER_CH_ENERGYCUTENABLE'] = (True if self.parameters['REJECTIONS']['SW_PARAMETER_CH_ENERGYCUTENABLE'] == 'true' else False)
self.formatted += 1
def get_parameters(self):
"""
Gets the board parameters (shared parameters for all channels).
"""
root = ET.parse(self.file).getroot()
board_parameters = root.find('board/parameters')
for entry in board_parameters:
key = entry.find('key').text
value = entry.find('value/value').text
group = entry.find('value/descriptor/group').text
if value == 'true':
value = True
if value == 'false':
value = False
for tab in self.groups:
if group == tab:
#if units is None:
self.parameters[tab][key] = value
#else:
#self.parameters[tab][key] = [value, units]
self.formatted = 0
self.reformat(['all'])
return self.parameters
def get_chn_parameters(self, chn_number: str):
root = ET.parse(self.file).getroot()
channels = root.findall('board/channel')
channel_in_use = channels[chn_number]
# keys = channel_in_use.findall('values/entry/key')
# values = channel_in_use.findall('values/entry/value')
entries = channel_in_use.findall('values/entry')
entries_with_vals = []
for index, entry in enumerate(entries):
if entry.find('value') is not None:
entries_with_vals.append(entries[index])
keys = []
values = []
for entry in entries_with_vals:
keys.append(entry.find('key'))
values.append(entry.find('value'))
list_format = []
for key in keys:
if key.text in self.reformatted:
list_format.append(self.reformatted_keys[self.reformatted.index(key.text)])
for group in self.parameters:
for index, key in enumerate(keys):
if key.text in self.parameters[group]:
if 'true' in values[index].text or 'false' in values[index].text:
values[index].text = (True if values[index].text == 'true' else False)
else:
self.parameters[group][key.text] = values[index].text
# self.formatted = 0
# self.reformat(list_format)
return self.parameters
def get_ch_label(self, chn_number: str):
root = ET.parse(self.file).getroot()
channels = root.findall('board/channel')
channel_to_check = channels[chn_number]
index = channel_to_check.find('index').text
entries = channel_to_check.findall('values/entry')
for entry in entries:
if entry.find('key').text == "SW_PARAMETER_CH_LABEL":
if entry.find('value') is not None:
label = entry.find('value').text
break
else: # I'm a genius. Don't mind me using disgusting functions in python.
label = "CH" #This is executed if the loop ends normally (so without encountering the break above.)
return (index, label)
class InfoParser:
def __init__(self, file: str):
self.file = file
def get_run_info(self):
with open(self.file) as f:
informations = f.readlines()[0:4]
self.id = informations[0].split('=')[-1][:-1]
self.time_start = datetime.strptime(informations[1].split('=')[-1].split('.')[0], "%Y/%m/%d %H:%M:%S")
self.time_stop = datetime.strptime(informations[2].split('=')[-1].split('.')[0], "%Y/%m/%d %H:%M:%S")
self.time_real = self.time_stop - self.time_start
return self.id, self.time_start, self.time_stop, self.time_real
if __name__ == '__main__':
file = "C:\\Users\\chloe\\OneDrive - McGill University\\Coincidence Testing\\Co60 Spectrums with different settings\\DAQ\\4096Chns-20lsb(LE)-80Gain-(300.80.50)-150s\\settings.xml"
test = XMLParser(file)
test.get_parameters()
print(test.get_ch_label(2))
|
Chujo58/ReadROOT
|
XML_Parser.py
|
XML_Parser.py
|
py
| 11,752 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 213,
"usage_type": "name"
}
] |
3281849942
|
import torch
import wandb
from torch import nn
import torchvision.utils as vutils
def get_time_emb(dim, time):
pos = torch.arange(0, time, dtype=torch.float)
omega = torch.arange(dim // 2, dtype=torch.float)
omega /= dim / 2.0
omega = 1.0 / 10000 ** omega
out = torch.einsum("m,d->md", pos, omega)
emb_sin = torch.sin(out)
emb_cos = torch.cos(out)
emb = torch.concatenate([emb_sin, emb_cos], dim=1)
return emb
class SlotBert(nn.Module):
def __init__(self, slate, num_actions, time,
n_heads=4, dim_feedforward=512, num_layers=4, detach=False,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.detach = detach
self.slate = slate
self.action_emb = nn.Embedding(num_actions, slate.slot_size)
self.rew_emb = nn.Linear(1, slate.slot_size)
self.modality_mask_emb = nn.Embedding(4, slate.slot_size)
self.time_emb = get_time_emb(slate.slot_size, time)
bert_layer = nn.TransformerEncoderLayer(slate.slot_size, n_heads, dim_feedforward, batch_first=True)
self.bert = nn.TransformerEncoder(bert_layer, num_layers)
self.act_ff = nn.Sequential(nn.Linear(slate.slot_size, slate.slot_size*2),
nn.GELU(),
nn.Linear(slate.slot_size*2, num_actions),
)
self.act_loss = torch.nn.CrossEntropyLoss()
@property
def device(self):
return next(self.parameters()).device
def embed_sep(self, obses, actions, rew, return_add=False):
target_sizes = list(obses.shape)[:2]
obses = torch.flatten(obses, 0, 1)
recon, ce, mse, attns, obses = self.slate(obses)
if self.detach:
obses = obses.detach()
obses_ret = torch.unflatten(obses, 0, target_sizes)
if return_add:
return obses_ret, self.action_emb(actions), self.rew_emb(rew.unsqueeze(-1)), mse, ce,\
torch.unflatten(recon, 0, target_sizes), torch.unflatten(attns, 0, target_sizes)
return obses_ret, self.action_emb(actions), self.rew_emb(rew.unsqueeze(-1)), mse, ce
def mask_sep(self, obses, actions, rew):
# 0 prob - 0 out
# 0 - mask (all masked)
gamma = torch.rand((obses.shape[0]), device=self.device)
gamma = gamma.unsqueeze(-1)
mask_probs_obses = torch.ones(obses.shape[:-1], device=self.device) * gamma.unsqueeze(-1)
mask_probs_actions_rew = torch.ones(actions.shape[:-1], device=self.device) * gamma
mask_obses = torch.bernoulli(mask_probs_obses).long()
mask_actions = torch.bernoulli(mask_probs_actions_rew).long()
mask_rew = torch.bernoulli(mask_probs_actions_rew).long()
return (
(obses * mask_obses.unsqueeze(-1)) + (1 - mask_obses.unsqueeze(-1)) * self.modality_mask_emb(
mask_obses),
(actions * mask_actions.unsqueeze(-1)) + (1 - mask_actions.unsqueeze(-1)) * self.modality_mask_emb(
mask_actions),
(rew * mask_rew.unsqueeze(-1)) + (1 - mask_rew.unsqueeze(-1)) * self.modality_mask_emb(mask_rew),
), (mask_obses, mask_actions, mask_rew)
def add_modality_sep(self, obses, actions, rew):
mod_obses = self.modality_mask_emb(torch.ones(obses.shape[:-1], dtype=torch.long, device=self.device))
mod_actions = self.modality_mask_emb(torch.ones(actions.shape[:-1], dtype=torch.long, device=self.device) * 2)
mod_rew = self.modality_mask_emb(torch.ones(rew.shape[:-1], dtype=torch.long, device=self.device) * 3)
return obses + mod_obses, actions + mod_actions, rew + mod_rew
def add_time_sep(self, obses, actions, rew):
actions_rew_time = self.time_emb.unsqueeze(0).to(self.device)
obses_time = actions_rew_time.unsqueeze(-2)
return obses + obses_time, actions + actions_rew_time, rew + actions_rew_time
def concat_all(self, obses, actions, rew):
actions_new = actions.unsqueeze(2)
rew_new = rew.unsqueeze(2)
stack = torch.cat([obses, actions_new, rew_new], dim=2)
stack = torch.flatten(stack, start_dim=1, end_dim=2)
return stack
def sep_to_seq(self, obses, actions, rewards):
obses, actions, rewards = self.add_modality_sep(obses, actions, rewards)
obses, actions, rewards = self.add_time_sep(obses, actions, rewards)
return self.concat_all(obses, actions, rewards)
def pass_to_bert(self, seq):
return self.bert(seq)
def forward(self, obses, actions, rewards):
t_obses, t_actions, t_rewards, mse, ce = self.embed_sep(obses, actions, rewards)
(m_obses, m_actions, m_rewards), (bm_o, bm_a, bm_r) = self.mask_sep(t_obses, t_actions, t_rewards)
masked_tokens = self.sep_to_seq(m_obses, m_actions, m_rewards)
masks = 1 - self.concat_all(bm_o, bm_a, bm_r) # mask = 0 should be included in loss
new_tokens = self.pass_to_bert(masked_tokens)
bert_mse = torch.mean((new_tokens - masked_tokens) ** 2 * masks.unsqueeze(-1))
# if self.detach:
# new_tokens = new_tokens.detach()
# TODO: check loss is correct
new_ttokens = new_tokens[:, self.slate.num_slots::self.slate.num_slots + 2]
actions_time = self.time_emb.unsqueeze(0).to(self.device)
mod_actions = self.modality_mask_emb(torch.ones(new_ttokens.shape[:-1],
dtype=torch.long, device=self.device) * 2)
new_ttokens = new_ttokens - actions_time - mod_actions
if self.detach:
new_ttokens = new_ttokens.detach()
new_actions = self.act_ff(new_ttokens)
loss = self.act_loss(new_actions.flatten(0, 1), actions.flatten(0, 1))
# END OF TD
return new_tokens, (mse, ce, bert_mse, loss)
@torch.no_grad()
def inv_din_inference(self, obses, actions, rewards):
losses = {}
# we should mask all rew
# we should mask last 2 actions
# we should not mask obses
# meaningful actions: last 2 obses are different
meaningful = torch.abs(obses[:, -1] - obses[:, -2])
meaningful = torch.max(torch.flatten(meaningful, 1), 1).values
meaningful = torch.eq(meaningful, torch.zeros_like(meaningful))
t_obses, t_actions, t_rewards, _, _, = self.embed_sep(obses, actions, rewards)
mask_obses = torch.ones(t_obses.shape[:-1], device=self.device).long()
mask_rew = torch.zeros(t_rewards.shape[:-1], device=self.device).long()
mask_actions = torch.ones(t_actions.shape[:-1], device=self.device).long()
mask_actions[:, -1] = 0
mask_actions[:, -2] = 0
m_obses, m_actions, m_rewards = (
(t_obses * mask_obses.unsqueeze(-1)) + (1 - mask_obses.unsqueeze(-1)) * self.modality_mask_emb(
mask_obses),
(t_actions * mask_actions.unsqueeze(-1)) + (1 - mask_actions.unsqueeze(-1)) * self.modality_mask_emb(
mask_actions),
(t_rewards * mask_rew.unsqueeze(-1)) + (1 - mask_rew.unsqueeze(-1)) * self.modality_mask_emb(mask_rew),
)
masked_tokens = self.sep_to_seq(m_obses, m_actions, m_rewards)
new_tokens = self.pass_to_bert(masked_tokens)
new_ttokens = new_tokens[:, self.slate.num_slots::self.slate.num_slots + 2]
actions_time = self.time_emb.unsqueeze(0).to(self.device)
mod_actions = self.modality_mask_emb(torch.ones(new_ttokens.shape[:-1],
dtype=torch.long, device=self.device) * 2)
new_ttokens = new_ttokens - actions_time - mod_actions
old_ttokens = masked_tokens[:, self.slate.num_slots::self.slate.num_slots + 2]
old_ttokens = old_ttokens - actions_time - mod_actions
new_actions = self.act_ff(new_ttokens)
new_action_emb = new_ttokens[:, -2]
old_action_emb = old_ttokens[:, -2]
losses['mse'] = torch.mean((new_action_emb - old_action_emb) ** 2)
losses['meaningful mse'] = torch.mean(((new_action_emb - old_action_emb)[meaningful]) ** 2)
distance = torch.norm(new_action_emb.unsqueeze(1) - self.action_emb.weight.data.unsqueeze(0), dim=2)
nearest = torch.argmin(distance, dim=1)
new_action = new_actions[:, -2]
old_action = actions[:, -2]
losses['cross entropy'] = self.act_loss(new_action, old_action)
losses['meanigful cross entropy'] = self.act_loss(new_action[meaningful],
old_action[meaningful])
new_action_max = torch.max(new_action, dim=1).indices
losses['accuracy'] = torch.sum(torch.eq(old_action, new_action_max)) / (old_action.shape[0])
losses['meanigful accuracy'] = torch.sum(torch.eq(old_action[meaningful],
new_action_max[meaningful])) / (
old_action[meaningful].shape[0])
losses['nearest accuracy'] = torch.sum(torch.eq(old_action, nearest)) / (old_action.shape[0])
losses['nearest meanigful accuracy'] = torch.sum(torch.eq(old_action[meaningful],
nearest[meaningful])) / (
old_action[meaningful].shape[0])
return losses
@torch.no_grad()
def forw_din_inference(self, obses, actions, rewards):
# we should mask all rew
# we should not mask actions
# we should mask last obs
losses = {}
t_obses, t_actions, t_rewards, _, _, recon, attns = self.embed_sep(obses, actions, rewards, return_add=True)
mask_obses = torch.ones(t_obses.shape[:-1], device=self.device).long()
mask_rew = torch.zeros(t_rewards.shape[:-1], device=self.device).long()
mask_actions = torch.ones(t_actions.shape[:-1], device=self.device).long()
mask_obses[:, -1] = 0
m_obses, m_actions, m_rewards = (
(t_obses * mask_obses.unsqueeze(-1)) + (1 - mask_obses.unsqueeze(-1)) * self.modality_mask_emb(
mask_obses),
(t_actions * mask_actions.unsqueeze(-1)) + (1 - mask_actions.unsqueeze(-1)) * self.modality_mask_emb(
mask_actions),
(t_rewards * mask_rew.unsqueeze(-1)) + (1 - mask_rew.unsqueeze(-1)) * self.modality_mask_emb(mask_rew),
)
masked_tokens = self.sep_to_seq(m_obses, m_actions, m_rewards)
new_tokens = self.pass_to_bert(masked_tokens)
new_tokens = new_tokens.unflatten(1, (-1, self.slate.num_slots + 2))
old_tokens = masked_tokens.unflatten(1, (-1, self.slate.num_slots + 2))
new_slots = new_tokens[:, -1, :-2]
old_slots = old_tokens[:, -1, :-2]
losses['mse slots'] = torch.mean((new_slots - old_slots) ** 2)
new_slots_deemb = new_slots - self.modality_mask_emb(torch.ones(new_slots.shape[:-1],
dtype=torch.long,
device=self.device))
new_slots_deemb = new_slots_deemb - self.time_emb.to(self.device).unsqueeze(-2)[-1:]
old_slots_deemb = old_slots - self.modality_mask_emb(torch.ones(old_slots.shape[:-1],
dtype=torch.long,
device=self.device))
old_slots_deemb = old_slots_deemb - self.time_emb.to(self.device).unsqueeze(-2)[-1:]
reconstruct = self.slate.reconstruct_slots(new_slots_deemb)
reconstruct_old = self.slate.reconstruct_slots(old_slots_deemb)
losses['mse images slate-bert'] = torch.mean((reconstruct - reconstruct_old) ** 2)
losses['mse images gt-slate'] = torch.mean((obses[:, -1] - reconstruct_old) ** 2)
losses['mse images gt-bert'] = torch.mean((reconstruct - obses[:, -1]) ** 2)
reconstruct = torch.cat([obses[:16, -1], recon[:16, -1], reconstruct[:16], reconstruct_old[:16]], dim=0)
grid = vutils.make_grid(reconstruct, nrow=16, pad_value=0.2)[:, 2:-2, 2:-2]
attns_grid = vutils.make_grid(torch.flatten(attns[:16, -1], 0, 1), nrow=16, pad_value=0.2)[:, 2:-2, 2:-2]
losses['visualisation'] = wandb.Image(grid)
losses['attns'] = wandb.Image(attns_grid)
return losses
|
Shimanogov/bert-slots
|
model.py
|
model.py
|
py
| 12,552 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.arange",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.float",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.arange",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.float",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.einsum",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.sin",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.cos",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.concatenate",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "torch.nn.TransformerEncoderLayer",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torch.nn.TransformerEncoder",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.nn.GELU",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "torch.flatten",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torch.unflatten",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.unflatten",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "torch.bernoulli",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torch.bernoulli",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "torch.bernoulli",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "torch.ones",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "torch.ones",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "torch.cat",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "torch.flatten",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "torch.abs",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "torch.flatten",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "torch.eq",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "torch.zeros_like",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "torch.mean",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "torch.norm",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "torch.argmin",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "torch.eq",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "torch.eq",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "torch.eq",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "torch.eq",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "torch.ones",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 215,
"usage_type": "attribute"
},
{
"api_name": "torch.mean",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "torchvision.utils.make_grid",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "torchvision.utils",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "torchvision.utils.make_grid",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "torchvision.utils",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "torch.flatten",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "wandb.Image",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "wandb.Image",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 183,
"usage_type": "call"
}
] |
27214693785
|
"""
Overview:
Functions to deal with encoding binary data easily.
"""
import sys
from typing import Optional, List
import chardet
from ..collection import unique
_DEFAULT_ENCODING = 'utf-8'
_DEFAULT_PREFERRED_ENCODINGS = ['utf-8', 'gbk', 'gb2312', 'gb18030', 'big5'] # common encodings for chinese
__all__ = [
'auto_decode'
]
def _decode(data: bytes, encoding: str) -> str:
return data.decode(encoding)
def auto_decode(data: bytes, encoding: Optional[str] = None, prefers: Optional[List[str]] = None) -> str:
r"""
Overview:
Auto decode binary data to string, the encoding mode will be automatically detected.
Arguments:
- data (:obj:`bytes`): Original binary data to be decoded.
- encoding (:obj:`Optional[str]`): Encoding mode to be used, default is ``None`` which \
means this function need to automatically detect the encoding.
- prefers (:obj:`Optional[List[str]]`): Prefered encodings.
Returns:
- str (:obj:`str`): Decoded string.
Examples::
>>> auto_decode(b'kdsfjldsjflkdsmgds') # 'kdsfjldsjflkdsmgds'
>>> auto_decode(b'\xd0\x94\xd0\xbe\xd0\xb1\xd1\x80\xd1\x8b\xd0\xb9 \xd0'
... b'\xb2\xd0\xb5\xd1\x87\xd0\xb5\xd1\x80') # "Добрый вечер"
>>> auto_decode(b'\xa4\xb3\xa4\xf3\xa4\xd0\xa4\xf3\xa4\xcf') # "こんばんは"
>>> auto_decode(b'\xcd\xed\xc9\xcf\xba\xc3') # "晚上好"
"""
if encoding:
return _decode(data, encoding)
else:
if prefers is None:
prefers = _DEFAULT_PREFERRED_ENCODINGS
_elist = filter(bool, unique([
*prefers,
sys.getdefaultencoding(),
chardet.detect(data)['encoding']
]))
last_err = None
for enc in _elist:
try:
return _decode(data, enc)
except UnicodeDecodeError as err:
if last_err is None or err.start > last_err.start:
last_err = err
raise last_err
|
HansBug/hbutils
|
hbutils/encoding/decode.py
|
decode.py
|
py
| 2,040 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "typing.Optional",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "collection.unique",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "sys.getdefaultencoding",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "chardet.detect",
"line_number": 54,
"usage_type": "call"
}
] |
6923620355
|
#encoding=utf8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import StringIO
import json
import logging
import random
import urllib
import urllib2
# functions
import responseHandler
# standard app engine imports
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
import webapp2
# global variables
from config import TOKEN
BASE_URL = 'https://api.telegram.org/bot' + TOKEN + '/'
# ================================
class EnableStatus(ndb.Model):
# key name: str(chat_id)
enabled = ndb.BooleanProperty(indexed=False, default=False)
def setEnabled(chat_id, yes):
es = EnableStatus.get_or_insert(str(chat_id))
es.enabled = yes
es.put()
def getEnabled(chat_id):
es = EnableStatus.get_by_id(str(chat_id))
if es:
return es.enabled
return False
class MeHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getMe'))))
class GetUpdatesHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getUpdates'))))
class SetWebhookHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
url = self.request.get('url')
if url:
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'setWebhook', urllib.urlencode({'url': url})))))
class WebhookHandler(webapp2.RequestHandler):
def post(self):
urlfetch.set_default_fetch_deadline(60)
body = json.loads(self.request.body)
logging.info('request body:')
logging.info(body)
self.response.write(json.dumps(body))
update_id = body['update_id']
message = body['message']
message_id = message.get('message_id')
location = message.get('location')
date = message.get('date')
text = message.get('text')
fr = message.get('from')
chat = message['chat']
chat_id = chat['id']
# location - weather
if location:
lat = location.get('latitude')
lon = location.get('longitude')
weather = responseHandler.locationInput(lat, lon)
responseHandler.sendWeather(chat_id, weather)
return
# command
if text:
if text.startswith('/'):
if text.lower() == '/start':
responseHandler.sendTextMessage(chat_id, '幸会。')
setEnabled(chat_id, True)
# 待办:最好把这里做成每晚定时
elif text.lower() == '/weathertmr':
LAT = 57.63
LON = 18.31
weather = responseHandler.locationInput(LAT, LON)
responseHandler.forecastWeather(chat_id, weather)
elif text.lower() == '/stop':
responseHandler.sendTextMessage(chat_id, '好,下次再说。')
setEnabled(chat_id, False)
else:
responseHandler.sendTextMessage(chat_id, '什么?')
else:
responseHandler.replyMessage(chat_id, message_id, '嗯')
app = webapp2.WSGIApplication([
('/me', MeHandler),
('/updates', GetUpdatesHandler),
('/set_webhook', SetWebhookHandler),
('/webhook', WebhookHandler),
], debug=True)
|
eglantine-shell/xiaoyiqingbot-2022
|
main.py
|
main.py
|
py
| 3,527 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.setdefaultencoding",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "config.TOKEN",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.ndb.Model",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "google.appengine.ext.ndb",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.ndb.BooleanProperty",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.ndb",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "webapp2.RequestHandler",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "google.appengine.api.urlfetch.set_default_fetch_deadline",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "google.appengine.api.urlfetch",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "urllib2.urlopen",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "webapp2.RequestHandler",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "google.appengine.api.urlfetch.set_default_fetch_deadline",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "google.appengine.api.urlfetch",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "urllib2.urlopen",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "webapp2.RequestHandler",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "google.appengine.api.urlfetch.set_default_fetch_deadline",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "google.appengine.api.urlfetch",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "urllib2.urlopen",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "urllib.urlencode",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "webapp2.RequestHandler",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "google.appengine.api.urlfetch.set_default_fetch_deadline",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "google.appengine.api.urlfetch",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "responseHandler.locationInput",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "responseHandler.sendWeather",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "responseHandler.sendTextMessage",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "responseHandler.locationInput",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "responseHandler.forecastWeather",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "responseHandler.sendTextMessage",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "responseHandler.sendTextMessage",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "responseHandler.replyMessage",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "webapp2.WSGIApplication",
"line_number": 113,
"usage_type": "call"
}
] |
6420520466
|
import time
import datetime
import math
import logging
class Logger():
def __init__(self):
self. start_time = time.time()
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
print('Starting ' + str(datetime.datetime.now()))
@staticmethod
def printLog(*messages, no_time = False, logging_level='INFO'):
message = Logger.unwrapMessage(messages)
if no_time:
print(message)
else:
print(str(datetime.datetime.now()) + '\t' + message)
@staticmethod
def unwrapMessage(*messages):
message = ''
for m in messages[0]:
message += str(m) + ' '
return message
def getElapsedTime(self):
time_min, str_report = self.calculateElapsedTime()
print(str_report)
return time_min
def calculateElapsedTime(self):
totalSeconds = time.time() - self.start_time
hours = math.floor(totalSeconds / 3600)
minutes = math.floor(totalSeconds / 60 - hours * 60)
seconds = totalSeconds - (hours * 3600 + minutes * 60)
endDate = datetime.datetime.now()
str_report = 'Time: ' + str(endDate)
str_report += '\n' + "--- Total Time: %s hours: %s minutes %s seconds " % (str(hours), str(minutes), str(seconds))
time_min = int((hours * 60 + minutes + seconds /60)*100)/100
return time_min, str_report
|
Script-2020/autoclusteringFinReports
|
util/Logger.py
|
Logger.py
|
py
| 1,437 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "time.time",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 40,
"usage_type": "attribute"
}
] |
16484762613
|
import pygame
import time
import random
pygame.init()
screensize = (200,200) # This is a Vector 2 Dimentional Object
screen = pygame.display.set_mode(screensize)
run = True
color = (250, 153, 0)
displacement = 0
x_pos = 200
x_pos_2 = 300
y_pos = 95
pipeno = 0
pipeno2 = 0
gamepipes = 10
loclist = []
for a in range(gamepipes):
loclist.append(random.randint(-20,40))
loclist2 = []
for b in range(gamepipes):
loclist2.append(random.randint(-20,40))
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
screen.fill(color)
pygame.draw.rect(screen, (255,255,255), pygame.Rect(95, y_pos, 20, 20))
pygame.draw.rect(screen, (255,255,255), pygame.Rect(x_pos, loclist[pipeno], 20, 60))
pygame.draw.rect(screen, (255,255,255), pygame.Rect(x_pos, 120 + loclist[pipeno], 20, 60))
pygame.draw.rect(screen, (255,255,255), pygame.Rect(x_pos_2, loclist2[pipeno2], 20, 60))
pygame.draw.rect(screen, (255,255,255), pygame.Rect(x_pos_2, 120 + loclist2[pipeno2], 20, 60))
displacement = 3
x_pos -= displacement
x_pos_2 -= displacement
keys = pygame.key.get_pressed()
if keys[pygame.K_UP]:
y_pos -= 8
y_pos += 2
if x_pos <= 0:
x_pos = 200
pipeno += 1
if x_pos_2 <= 0:
x_pos_2 = 200
pipeno2 += 1
if y_pos >= 200:
run = False
print("game over!")
rect1 = pygame.Rect(95, y_pos, 20, 20)
rect2 = pygame.Rect(x_pos, loclist[pipeno], 20, 60)
rect3 = pygame.Rect(x_pos, 120 + loclist[pipeno], 20, 60)
rect4 = pygame.Rect(x_pos_2, loclist2[pipeno2], 20, 60)
rect5 = pygame.Rect(x_pos_2, 120 + loclist2[pipeno2], 20, 60)
collideTest1 = rect1.colliderect(rect2)
collideTest2 = rect1.colliderect(rect3)
collideTest3 = rect1.colliderect(rect4)
collideTest4 = rect1.colliderect(rect5)
if collideTest1 == 1 or collideTest2 == 1 or collideTest3 == 1 or collideTest4 == 1:
print("game over")
run = False
time.sleep(0.033)
pygame.display.update()
print(f"Total Pipes: {pipeno + pipeno2}")
|
RinUnderscore/LSCC-Pygame-Lesson
|
main.py
|
main.py
|
py
| 1,965 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.init",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pygame.event.get",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pygame.draw.rect",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pygame.draw.rect",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pygame.draw.rect",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pygame.draw.rect",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "pygame.draw.rect",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "pygame.key.get_pressed",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pygame.key",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_UP",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "pygame.display.update",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 85,
"usage_type": "attribute"
}
] |
1790565048
|
import pdfplumber
import pandas as pd
from babel.numbers import format_currency
def extrair_tabelas(pdf_path):
with pdfplumber.open(pdf_path) as pdf:
# Inicialize uma lista para armazenar todas as tabelas
todas_tabelas = []
# Itera sobre todas as páginas do PDF
for pagina in pdf.pages:
# Extraindo as tabelas da página atual
tabelas_pagina = pagina.extract_tables()
# Adiciona as tabelas à lista
todas_tabelas.extend(tabelas_pagina)
# Retorna a lista de tabelas
return todas_tabelas
# Substitua 'caminho_para_seu_pdf.pdf' pelo caminho real do seu arquivo PDF
caminho_pdf = 'fatura-pdf/xp3.pdf'
tabelas = extrair_tabelas(caminho_pdf)
gastos = []
# Imprime as tabelas
for i, tabela in enumerate(tabelas):
# Verifica se a tabela foi extraída corretamente e se contém a string "Data"
if tabela and any("Data" in row for row in tabela):
df = pd.DataFrame(tabela[1:], columns=tabela[0])
# Imprime o DataFrame linha a linha, coluna a coluna
for indice, linha in df.iterrows():
# Verifica se a primeira coluna está no formato de data (22/10/23)
if pd.to_datetime(linha.iloc[0], errors='coerce', dayfirst=True, format='%d/%m/%y') and pd.notna(pd.to_datetime(linha.iloc[0], errors='coerce', dayfirst=True, format='%d/%m/%y')):
gastos.append([linha.iloc[0], linha.iloc[1], linha.iloc[2]])
# Imprime a lista de gastos de forma mais legível
total = 0.0
for i, gasto in enumerate(gastos):
if "Pagamentos Validos Normais" not in gasto[1]:
print(f"{gasto[0]} {gasto[1]} {gasto[2]}")
valor = gasto[2].replace('.', '').replace(',', '.')
total += float(valor)
# Formata o total da fatura em reais (R$) usando babel
total_formatado = format_currency(total, 'BRL', locale='pt_BR')
print("Total da fatura:", total_formatado)
|
regis-amaral/python-scripts
|
fatura-pdf/reader-fatura.py
|
reader-fatura.py
|
py
| 1,927 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pdfplumber.open",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pandas.notna",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "babel.numbers.format_currency",
"line_number": 48,
"usage_type": "call"
}
] |
40128810884
|
#!/usr/bin/env python3
import itertools
from collections import defaultdict
from heapq import heappush, heappop
import sys
sys.setrecursionlimit(10**6)
input = sys.stdin.buffer.readline
INF = 10 ** 9 + 1 # sys.maxsize # float("inf")
def debug(*x):
print(*x, file=sys.stderr)
def blute_solve(N, AS):
"void()"
buf = []
def blute(xs, buf):
debug("blute: xs, buf", xs, buf)
if not xs:
return 0
if not buf:
# first player score 0
return blute(xs[1:], [xs[0]])
# insert
candidate = []
for i in range(len(buf)):
s = min(buf[i - 1], buf[i])
newBuf = buf[:]
newBuf.insert(0, xs[0])
candidate.append(blute(xs[1:], newBuf) + s)
return max(candidate)
candidate = []
for xs in itertools.permutations(range(N)):
candidate.append(blute(xs, buf))
return max(candidate)
def solve(N, AS):
buf = []
AS.sort(reverse=True)
ret = AS[0]
for i in range(N - 2):
ret += AS[1 + i // 2]
return ret
def main():
N = int(input())
AS = list(map(int, input().split()))
print(solve(N, AS))
T1 = """
4
2 2 1 3
"""
def test_T1():
"""
>>> as_input(T1)
>>> main()
7
"""
T0 = """
3
3 2 1
"""
def test_T0():
"""
>>> as_input(T0)
>>> main()
5
"""
T2 = """
7
1 1 1 1 1 1 1
"""
def test_T2():
"""
>>> as_input(T2)
>>> main()
6
"""
def _test():
import doctest
doctest.testmod()
def as_input(s):
"use in test, use given string as input file"
import io
global read, input
f = io.StringIO(s.strip())
def input():
return bytes(f.readline(), "ascii")
def read():
return bytes(f.read(), "ascii")
USE_NUMBA = False
if (USE_NUMBA and sys.argv[-1] == 'ONLINE_JUDGE') or sys.argv[-1] == '-c':
print("compiling")
from numba.pycc import CC
cc = CC('my_module')
cc.export('solve', solve.__doc__.strip().split()[0])(solve)
cc.compile()
exit()
else:
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
if (USE_NUMBA and sys.argv[-1] != '-p') or sys.argv[-1] == "--numba":
# -p: pure python mode
# if not -p, import compiled module
from my_module import solve # pylint: disable=all
elif sys.argv[-1] == "-t":
_test()
sys.exit()
elif sys.argv[-1] != '-p' and len(sys.argv) == 2:
# input given as file
input_as_file = open(sys.argv[1])
input = input_as_file.buffer.readline
read = input_as_file.buffer.read
main()
|
nishio/atcoder
|
abc173/d.py
|
d.py
|
py
| 2,643 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "sys.setrecursionlimit",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "itertools.permutations",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "doctest.testmod",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "io.StringIO",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "numba.pycc.CC",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 139,
"usage_type": "attribute"
}
] |
16543818867
|
from nuitka.nodes.CallNodes import makeExpressionCall
from nuitka.nodes.ConstantRefNodes import makeConstantRefNode
from nuitka.nodes.ContainerMakingNodes import (
makeExpressionMakeTuple,
makeExpressionMakeTupleOrConstant,
)
from nuitka.nodes.DictionaryNodes import makeExpressionMakeDictOrConstant
from nuitka.nodes.FunctionNodes import (
ExpressionFunctionRef,
makeExpressionFunctionCall,
makeExpressionFunctionCreation,
)
from nuitka.nodes.KeyValuePairNodes import makeExpressionPairs
from nuitka.nodes.OutlineNodes import ExpressionOutlineBody
from nuitka.nodes.ReturnNodes import StatementReturn
from nuitka.nodes.VariableAssignNodes import makeStatementAssignmentVariable
from nuitka.nodes.VariableRefNodes import ExpressionTempVariableRef
from nuitka.PythonVersions import python_version
from .ComplexCallHelperFunctions import (
getFunctionCallHelperDictionaryUnpacking,
getFunctionCallHelperKeywordsStarDict,
getFunctionCallHelperKeywordsStarList,
getFunctionCallHelperKeywordsStarListStarDict,
getFunctionCallHelperPosKeywordsStarDict,
getFunctionCallHelperPosKeywordsStarList,
getFunctionCallHelperPosKeywordsStarListStarDict,
getFunctionCallHelperPosStarDict,
getFunctionCallHelperPosStarList,
getFunctionCallHelperPosStarListStarDict,
getFunctionCallHelperStarDict,
getFunctionCallHelperStarList,
getFunctionCallHelperStarListStarDict,
)
from .ReformulationDictionaryCreation import buildDictionaryUnpackingArgs
from .ReformulationSequenceCreation import buildListUnpacking
from .TreeHelpers import (
buildNode,
buildNodeTuple,
getKind,
makeStatementsSequenceFromStatements,
)
def buildCallNode(provider, node, source_ref):
called = buildNode(provider, node.func, source_ref)
if python_version >= 0x350:
list_star_arg = None
dict_star_arg = None
positional_args = []
# For Python3.5 compatibility, the error handling with star argument last
# is the old one, only with a starred argument before that, things use the
# new unpacking code.
for node_arg in node.args[:-1]:
if getKind(node_arg) == "Starred":
assert python_version >= 0x350
list_star_arg = buildListUnpacking(provider, node.args, source_ref)
positional_args = ()
break
else:
if node.args and getKind(node.args[-1]) == "Starred":
assert python_version >= 0x350
list_star_arg = buildNode(provider, node.args[-1].value, source_ref)
positional_args = buildNodeTuple(provider, node.args[:-1], source_ref)
else:
positional_args = buildNodeTuple(provider, node.args, source_ref)
# Only the values of keyword pairs have a real source ref, and those only
# really matter, so that makes sense.
keys = []
values = []
for keyword in node.keywords[:-1]:
if keyword.arg is None:
assert python_version >= 0x350
outline_body = ExpressionOutlineBody(
provider=provider, name="dict_unpacking_call", source_ref=source_ref
)
tmp_called = outline_body.allocateTempVariable(
temp_scope=None, name="called"
)
helper_args = [
ExpressionTempVariableRef(variable=tmp_called, source_ref=source_ref),
makeExpressionMakeTuple(
elements=buildDictionaryUnpackingArgs(
provider=provider,
keys=(keyword.arg for keyword in node.keywords),
values=(keyword.value for keyword in node.keywords),
source_ref=source_ref,
),
source_ref=source_ref,
),
]
dict_star_arg = makeExpressionFunctionCall(
function=makeExpressionFunctionCreation(
function_ref=ExpressionFunctionRef(
function_body=getFunctionCallHelperDictionaryUnpacking(),
source_ref=source_ref,
),
defaults=(),
kw_defaults=None,
annotations=None,
source_ref=source_ref,
),
values=helper_args,
source_ref=source_ref,
)
outline_body.setChildBody(
makeStatementsSequenceFromStatements(
makeStatementAssignmentVariable(
variable=tmp_called, source=called, source_ref=source_ref
),
StatementReturn(
expression=_makeCallNode(
called=ExpressionTempVariableRef(
variable=tmp_called, source_ref=source_ref
),
positional_args=positional_args,
keys=keys,
values=values,
list_star_arg=list_star_arg,
dict_star_arg=dict_star_arg,
source_ref=source_ref,
),
source_ref=source_ref,
),
)
)
return outline_body
# For Python3.5 compatibility, the error handling with star argument last
# is the old one, only with a starred argument before that, things use the
# new unpacking code.
if node.keywords and node.keywords[-1].arg is None:
assert python_version >= 0x350
dict_star_arg = buildNode(provider, node.keywords[-1].value, source_ref)
keywords = node.keywords[:-1]
else:
keywords = node.keywords
for keyword in keywords:
keys.append(
makeConstantRefNode(
constant=keyword.arg, source_ref=source_ref, user_provided=True
)
)
values.append(buildNode(provider, keyword.value, source_ref))
if python_version < 0x350:
list_star_arg = buildNode(provider, node.starargs, source_ref, True)
dict_star_arg = buildNode(provider, node.kwargs, source_ref, True)
return _makeCallNode(
called=called,
positional_args=positional_args,
keys=keys,
values=values,
list_star_arg=list_star_arg,
dict_star_arg=dict_star_arg,
source_ref=source_ref,
)
def _makeCallNode(
called, positional_args, keys, values, list_star_arg, dict_star_arg, source_ref
):
# Many variables, but only to cover the many complex call cases.
if list_star_arg is None and dict_star_arg is None:
result = makeExpressionCall(
called=called,
args=makeExpressionMakeTupleOrConstant(
elements=positional_args,
user_provided=True,
source_ref=source_ref,
),
kw=makeExpressionMakeDictOrConstant(
makeExpressionPairs(keys=keys, values=values),
user_provided=True,
source_ref=source_ref,
),
source_ref=source_ref,
)
# Bug compatible line numbers before Python 3.8
if python_version < 0x380:
if values:
result.setCompatibleSourceReference(
source_ref=values[-1].getCompatibleSourceReference()
)
elif positional_args:
result.setCompatibleSourceReference(
source_ref=positional_args[-1].getCompatibleSourceReference()
)
return result
else:
# Dispatch to complex helper function for each case. These do
# re-formulation of complex calls according to Developer Manual.
key = (
bool(positional_args),
bool(keys),
list_star_arg is not None,
dict_star_arg is not None,
)
table = {
(True, True, True, False): getFunctionCallHelperPosKeywordsStarList,
(True, False, True, False): getFunctionCallHelperPosStarList,
(False, True, True, False): getFunctionCallHelperKeywordsStarList,
(False, False, True, False): getFunctionCallHelperStarList,
(True, True, False, True): getFunctionCallHelperPosKeywordsStarDict,
(True, False, False, True): getFunctionCallHelperPosStarDict,
(False, True, False, True): getFunctionCallHelperKeywordsStarDict,
(False, False, False, True): getFunctionCallHelperStarDict,
(True, True, True, True): getFunctionCallHelperPosKeywordsStarListStarDict,
(True, False, True, True): getFunctionCallHelperPosStarListStarDict,
(False, True, True, True): getFunctionCallHelperKeywordsStarListStarDict,
(False, False, True, True): getFunctionCallHelperStarListStarDict,
}
get_helper = table[key]
helper_args = [called]
if positional_args:
helper_args.append(
makeExpressionMakeTupleOrConstant(
elements=positional_args,
user_provided=True,
source_ref=source_ref,
)
)
# Order of evaluation changed in Python3.5.
if python_version >= 0x350 and list_star_arg is not None:
helper_args.append(list_star_arg)
if keys:
helper_args.append(
makeExpressionMakeDictOrConstant(
pairs=makeExpressionPairs(keys=keys, values=values),
user_provided=True,
source_ref=source_ref,
)
)
# Order of evaluation changed in Python3.5.
if python_version < 0x350 and list_star_arg is not None:
helper_args.append(list_star_arg)
if dict_star_arg is not None:
helper_args.append(dict_star_arg)
result = makeExpressionFunctionCall(
function=makeExpressionFunctionCreation(
function_ref=ExpressionFunctionRef(
function_body=get_helper(), source_ref=source_ref
),
defaults=(),
kw_defaults=None,
annotations=None,
source_ref=source_ref,
),
values=helper_args,
source_ref=source_ref,
)
# Bug compatible line numbers before Python 3.8
if python_version < 0x380:
result.setCompatibleSourceReference(
source_ref=helper_args[-1].getCompatibleSourceReference()
)
return result
|
Nuitka/Nuitka
|
nuitka/tree/ReformulationCallExpressions.py
|
ReformulationCallExpressions.py
|
py
| 10,742 |
python
|
en
|
code
| 10,019 |
github-code
|
6
|
[
{
"api_name": "TreeHelpers.buildNode",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "nuitka.PythonVersions.python_version",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "TreeHelpers.getKind",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "nuitka.PythonVersions.python_version",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "ReformulationSequenceCreation.buildListUnpacking",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "TreeHelpers.getKind",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "nuitka.PythonVersions.python_version",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "TreeHelpers.buildNode",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "TreeHelpers.buildNodeTuple",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "TreeHelpers.buildNodeTuple",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "nuitka.PythonVersions.python_version",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "nuitka.nodes.OutlineNodes.ExpressionOutlineBody",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "nuitka.nodes.VariableRefNodes.ExpressionTempVariableRef",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "nuitka.nodes.ContainerMakingNodes.makeExpressionMakeTuple",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "ReformulationDictionaryCreation.buildDictionaryUnpackingArgs",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "nuitka.nodes.FunctionNodes.makeExpressionFunctionCall",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "nuitka.nodes.FunctionNodes.makeExpressionFunctionCreation",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "nuitka.nodes.FunctionNodes.ExpressionFunctionRef",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "ComplexCallHelperFunctions.getFunctionCallHelperDictionaryUnpacking",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "TreeHelpers.makeStatementsSequenceFromStatements",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "nuitka.nodes.VariableAssignNodes.makeStatementAssignmentVariable",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "nuitka.nodes.ReturnNodes.StatementReturn",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "nuitka.nodes.VariableRefNodes.ExpressionTempVariableRef",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "nuitka.PythonVersions.python_version",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "TreeHelpers.buildNode",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "nuitka.nodes.ConstantRefNodes.makeConstantRefNode",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "TreeHelpers.buildNode",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "nuitka.PythonVersions.python_version",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "TreeHelpers.buildNode",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "TreeHelpers.buildNode",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "nuitka.nodes.CallNodes.makeExpressionCall",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "nuitka.nodes.ContainerMakingNodes.makeExpressionMakeTupleOrConstant",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "nuitka.nodes.DictionaryNodes.makeExpressionMakeDictOrConstant",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "nuitka.nodes.KeyValuePairNodes.makeExpressionPairs",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "nuitka.PythonVersions.python_version",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "ComplexCallHelperFunctions.getFunctionCallHelperPosKeywordsStarList",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "ComplexCallHelperFunctions.getFunctionCallHelperPosStarList",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "ComplexCallHelperFunctions.getFunctionCallHelperKeywordsStarList",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "ComplexCallHelperFunctions.getFunctionCallHelperStarList",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "ComplexCallHelperFunctions.getFunctionCallHelperPosKeywordsStarDict",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "ComplexCallHelperFunctions.getFunctionCallHelperPosStarDict",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "ComplexCallHelperFunctions.getFunctionCallHelperKeywordsStarDict",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "ComplexCallHelperFunctions.getFunctionCallHelperStarDict",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "ComplexCallHelperFunctions.getFunctionCallHelperPosKeywordsStarListStarDict",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "ComplexCallHelperFunctions.getFunctionCallHelperPosStarListStarDict",
"line_number": 230,
"usage_type": "name"
},
{
"api_name": "ComplexCallHelperFunctions.getFunctionCallHelperKeywordsStarListStarDict",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "ComplexCallHelperFunctions.getFunctionCallHelperStarListStarDict",
"line_number": 232,
"usage_type": "name"
},
{
"api_name": "nuitka.nodes.ContainerMakingNodes.makeExpressionMakeTupleOrConstant",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "nuitka.PythonVersions.python_version",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "nuitka.nodes.DictionaryNodes.makeExpressionMakeDictOrConstant",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "nuitka.nodes.KeyValuePairNodes.makeExpressionPairs",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "nuitka.PythonVersions.python_version",
"line_number": 262,
"usage_type": "name"
},
{
"api_name": "nuitka.nodes.FunctionNodes.makeExpressionFunctionCall",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "nuitka.nodes.FunctionNodes.makeExpressionFunctionCreation",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "nuitka.nodes.FunctionNodes.ExpressionFunctionRef",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "nuitka.PythonVersions.python_version",
"line_number": 283,
"usage_type": "name"
}
] |
15792066200
|
import argparse
from os import listdir, makedirs
from os.path import isfile, join, basename, dirname, isdir
from PIL import Image
from tqdm import tqdm
# folder_path = 'photos'
# left, top, right, bottom = 559, 225, 1361, 0
# -d ./photos -s ./photos2 -c -a 559 225 1361 0
def build_argparse():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--directory', help = 'perform the action on all photos in a directory', type = str,
default = False)
parser.add_argument('-f', '--file', help = 'perform the action on one photo', type = str, default = False)
parser.add_argument('-n', '--name', help = 'new file name', type = str, default = '')
parser.add_argument('-s', '--save', help = 'destination directory to save the new photos', type = str,
default = False)
parser.add_argument('-c', '--crop', help = 'crop the image(s) in a rectangle', action = 'store_true')
parser.add_argument('-a', '--area', help = 'define the rectangle to crop, the order of sequence is: left, top, '
'right, bottom', type = int, nargs = 4, default = [0, 0, 0, 0])
parser.add_argument('-l', '--left', help = 'the left pixel from to crop', type = int, default = 0)
parser.add_argument('-t', '--top', help = 'the top pixel from to crop', type = int, default = 0)
parser.add_argument('-r', '--right', help = 'the right pixel from to crop', type = int, default = 0)
parser.add_argument('-b', '--bottom', help = 'the bottom pixel from to crop', type = int, default = 0)
args = parser.parse_args()
return args
def photo_crop(image_path, save_to_path, left = 0, top = 0, right = 0, bottom = 0, number = 0, new_file_name = ''):
with Image.open(image_path) as image:
image_colors = image.load()
if left == 0 and right == 0 and top == 0 and bottom == 0:
right, bottom = image.size
if bottom == 0:
for y in range(400, image.size[1]):
if image_colors[1358, y] == (181, 181, 181, 255):
bottom = y
bottom += 2
cropped_image = image.crop((left, top, right, bottom))
if new_file_name == '':
cropped_image.save(join(save_to_path, basename(image_path)))
else:
if '.' in new_file_name:
cropped_image.save(join(save_to_path, '{}{:02d}.{}'.format(new_file_name.split('.')[0], number if number > 0 else '', basename(new_file_name).split('.')[1])))
else:
cropped_image.save(join(save_to_path, '{}{:02d}.{}'.format(new_file_name.split('.')[0], number if number > 0 else '', basename(image_path).split('.')[1])))
def all_files_in_folder(folder_path):
return [join(folder_path, f) for f in listdir(folder_path) if isfile(join(folder_path, f))]
def main():
args = build_argparse()
if args.crop:
if args.area != [0, 0, 0, 0]:
left, top, right, bottom = args.area
else:
left = args.left
top = args.top
right = args.right
bottom = args.bottom
if args.directory:
if args.save:
path_to_save = args.save
if not isdir(path_to_save):
makedirs(path_to_save)
else:
path_to_save = args.directory
for i, image_path in enumerate(tqdm(all_files_in_folder(args.directory))):
photo_crop(image_path, path_to_save, left, top, right, bottom, i+1, args.name)
print("The operations are completed check the {} folder for the photos.".format(path_to_save))
elif args.file:
if args.save:
path_to_save = args.save
if not isdir(path_to_save):
makedirs(path_to_save)
else:
path_to_save = dirname(args.file)
photo_crop(args.file, path_to_save, left, top, right, bottom, 0, args.name)
print("The operation is completed check the {} folder for the photo.".format(path_to_save))
main()
|
zsoman/photo_editing
|
PhotoCropper.py
|
PhotoCropper.py
|
py
| 4,114 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 85,
"usage_type": "call"
}
] |
10241474430
|
import scipy
import copy
import numpy as np
def gesd(x, **kwargs):
x_ = np.array(x)
alpha = 0.05 if 'alpha' not in kwargs else kwargs['alpha']
n_out = int(np.ceil(len(x_) * 0.1)) if 'n_out' not in kwargs else kwargs['n_out']
outlier_side = 0 if 'outlier_side' not in kwargs else kwargs['outlier_side']
alpha_ = alpha / 2 if outlier_side == 0 else alpha
n = len(x_)
temp = x_
R = np.zeros([n_out])
rm_idx = copy.copy(R).astype(int)
lam = copy.copy(R)
for j in range(n_out):
if outlier_side == -1:
sample = np.nanmin(temp)
rm_idx[j] = list(temp).index(sample)
R[j] = (np.nanmean(temp) - sample)
elif outlier_side == 0:
R[j] = np.nanmax(abs(temp - np.nanmean(temp)))
rm_idx[j] = np.argmax(abs(temp - np.nanmean(temp)))
else:
sample = np.nanmax(temp)
rm_idx[j] = list(temp).index(sample)
R[j] = (sample - np.nanmean(temp))
R[j] /= np.nanstd(temp)
temp[rm_idx[j]] = float('nan')
p = 1 - alpha_ / (n - j + 1)
t = scipy.stats.t.ppf(p, n - j - 1)
lam[j] = ((n - j) * t) / (np.sqrt((n - j - 1 + t ** 2) * (n - j + 1)))
idx = np.zeros(n).astype(bool)
if True in list(R > lam)[::-1]:
a_ = list(R > lam)[::-1].index(True)
b = rm_idx[0:a_]
idx[b] = True
x2 = x_[~idx]
return idx, x2
|
WHThhhh/Seeg_prepro
|
GESD_wht.py
|
GESD_wht.py
|
py
| 1,421 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "numpy.array",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.ceil",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.nanmin",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.nanmean",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.nanmax",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.nanmean",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.nanmean",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.nanmax",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.nanmean",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.nanstd",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "scipy.stats.t.ppf",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 38,
"usage_type": "call"
}
] |
41211788790
|
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
import os
import json
from PIL import Image
import requests
from io import BytesIO
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def cosine_similarity(ratings):
sim = ratings.dot(ratings.T)
if not isinstance(sim, np.ndarray):
sim = sim.toarray()
norms = np.array([np.sqrt(np.diagonal(sim))])
return (sim / norms / norms.T)
def main():
y_test = []
x_test = []
FILE_PATH = "/content/gdrive/MyDrive/TER/MoviesDataBase/movie_1202"
IMAGE_BASE_PATH = "https://image.tmdb.org/t/p/w500"
for movie in os.listdir(FILE_PATH):
if movie.split(".")[1] != "json":
continue
movie_id = movie.split('_')[1].split('.')[0]
fr = open(FILE_PATH + "/" + movie)
movie_model = json.load(fr)
fr.close()
if movie_model['poster_path']:
img_path = IMAGE_BASE_PATH + movie_model['poster_path']
html = requests.get(img_path, verify=False)
poster = Image.open(BytesIO(html.content))
poster_img = poster.crop()
if poster:
# img = image.load_img(poster_img, target_size=(224, 224))
img = poster_img.resize((224, 224))
# img.show()
y_test.append(movie_id)
x = image.img_to_array(img)
# print(movie_id)
# print(x[:,:,0])
# print(np.shape(x[:,:,0]))
# exit(0)
if np.shape(x)[2] == 1:
x = np.stack((x[:, :, 0],) * 3, axis=-1)
x = np.expand_dims(x, axis=0)
if len(x_test) > 0:
# print(np.shape(x_test))
# print(np.shape(x))
# exit(0)
x_test = np.concatenate((x_test, x))
else:
x_test = x
x_test = preprocess_input(x_test)
model = ResNet50(weights='imagenet', include_top=False)
features = model.predict(x_test)
# print(np.shape(features))
# print(len(y_test))
features_compress = features.reshape(len(y_test), 7 * 7 * 2048)
# print(np.shape(features_compress))
# sim = cosine_similarity(features_compress)
image_sample = Image.open("/content/gdrive/MyDrive/TER/Test/image2.jpg")
imageS = image_sample.crop()
thisImage = imageS.resize((224, 224))
my_image = image.img_to_array(thisImage)
my_x = np.expand_dims(my_image, axis=0)
my_x = preprocess_input(my_x)
my_features = model.predict(my_x)
my_features_compress = my_features.reshape(1, 7 * 7 * 2048)
new_features = np.append(features_compress, my_features_compress, axis=0)
# print(np.shape(new_features))
# exit(0)
sim = cosine_similarity(new_features)
# print("sim:", np.shape(sim))
top = np.argsort(-sim[-1, :], axis=0)[1:3]
recommend = [y_test[i] for i in top]
print(recommend)
# print(sim)
if __name__ == "__main__":
main()
|
ming19956/PFE
|
information-retrival-search-engine/informationRetrival/resnet50/resnet50.py
|
resnet50.py
|
py
| 3,180 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "urllib3.disable_warnings",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "urllib3.exceptions",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.diagonal",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "io.BytesIO",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image.img_to_array",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "numpy.shape",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "keras.applications.resnet50.preprocess_input",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "keras.applications.resnet50.ResNet50",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "keras.preprocessing.image.img_to_array",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "numpy.expand_dims",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "keras.applications.resnet50.preprocess_input",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 97,
"usage_type": "call"
}
] |
33055278390
|
import psycopg2 as psy
import pandas as pd
# Class for connect and write Postgresql
class PsycopgPostgresWarehouse:
def __init__(self, host, database, user, pw, port):
self.host = host
self.database = database
self.user = user
self.pw = pw
self.port = port
# Connect Database
def connect_database(self) -> str:
try:
conn = psy.connect(
host=self.host,
database=self.database,
user=self.user,
password=self.pw,
port=self.port
)
print("Connect Success")
except:
print("Failed connect")
return conn
# Connect database query data to Dataframe
@staticmethod
def to_dataframe(conn, query) -> pd.DataFrame:
df = pd.read_sql(query, conn)
return df
# https://www.mindphp.com/developer/tips-python/7907-static-method-in-python.html#:~:text=Static
# %20method%20%E0%B8%84%E0%B8%B7%E0%B8%AD%20%E0%B8%81%E0%B8%B2%E0%B8%A3%E0%B8%97%E0%B8%B3%E0%B9%83%E0%B8%AB%E0%B9
# %89,%E0%B9%80%E0%B8%A3%E0%B8%B5%E0%B8%A2%E0%B8%81%E0%B9%80%E0%B8%A1%E0%B8%98%E0%B8%AD%E0%B8%94%E0%B9%81%E0%B8
# %95%E0%B9%88%E0%B8%A5%E0%B8%B0%E0%B8%84%E0%B8%A3%E0%B8%B1%E0%B9%89%E0%B8%87%20%E0%B9%80%E0%B8%8A%E0%B9%88%E0%B8
# %99
@staticmethod
def execute_mogrify_upsert(conn, dataframe, column_unique, table):
df_not_unique_key = dataframe.drop(columns=column_unique)
upsert_value = ["EXCLUDED." + s for s in df_not_unique_key.columns]
tpls = [tuple(x) for x in dataframe.to_numpy()]
cols = ','.join(list(dataframe.columns))
cols_2 = ','.join(list(df_not_unique_key.columns))
cols_3 = ','.join(upsert_value)
no_column = len(dataframe.columns)
add_no_column_to_insert = "%s," * no_column
add = add_no_column_to_insert.rstrip(add_no_column_to_insert[-1])
cursor = conn.cursor()
values = [cursor.mogrify("(" + add + ")", tup).decode('utf8') for tup in tpls]
sql = "INSERT INTO %s(%s) VALUES " % (table, cols) + ",".join(
values) + " ON CONFLICT (" + column_unique + ") DO UPDATE SET (" + cols_2 + ")" + " = (" + cols_3 + ")"
# print(sql)
cursor.execute(sql)
print("Data upserted using execute_mogrify() successfully. : " + str(table) + "")
conn.commit()
cursor.close()
@staticmethod
def execute_mogrify_upsert_single_column(conn, dataframe, column_unique, table):
df_not_unique_key = dataframe.drop(columns=column_unique)
upsert_value = ["EXCLUDED." + s for s in df_not_unique_key.columns]
tpls = [tuple(x) for x in dataframe.to_numpy()]
cols = ','.join(list(dataframe.columns))
cols_2 = ','.join(list(df_not_unique_key.columns))
cols_3 = ','.join(upsert_value)
no_column = len(dataframe.columns)
add_no_column_to_insert = "%s," * no_column
add = add_no_column_to_insert.rstrip(add_no_column_to_insert[-1])
cursor = conn.cursor()
values = [cursor.mogrify("(" + add + ")", tup).decode('utf8') for tup in tpls]
sql = "INSERT INTO %s(%s) VALUES " % (table, cols) + ",".join(
values) + " ON CONFLICT (" + column_unique + ") DO UPDATE SET " + cols_2 + "" + " = " + cols_3 + ""
# print(function)
cursor.execute(sql)
print("Data upserted using execute_mogrify() successfully. : " + str(table) + "")
conn.commit()
cursor.close()
@staticmethod
def execute_mogrify_insert(conn, dataframe, table):
# Creating a list of tupples from the dataframe values
tpls = [tuple(x) for x in dataframe.to_numpy()]
# dataframe columns with Comma-separated
cols = ','.join(list(dataframe.columns))
no_column = len(dataframe.columns)
add_no_column_to_insert = "%s," * no_column
add = add_no_column_to_insert.rstrip(add_no_column_to_insert[-1])
# SQL query to execute
cursor = conn.cursor()
values = [cursor.mogrify("(" + add + ")", tup).decode('utf8') for tup in tpls]
sql = "INSERT INTO %s(%s) VALUES " % (table, cols) + ",".join(values)
# print(function)
# try:
cursor.execute(sql)
# print(cursor.execute(function))
conn.commit()
print("Data inserted using execute_mogrify() successfully.")
cursor.close()
# except (Exception, psy.DatabaseError):
# print(psy.DatabaseError)
# print("Error")
# cursor.close()
# pass
|
Tana8M/data-engineer-assignment
|
pipeline/function/postgresql_function/psycopg2_postgresql.py
|
psycopg2_postgresql.py
|
py
| 4,631 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "psycopg2.connect",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 35,
"usage_type": "attribute"
}
] |
21640069600
|
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from .models import VideoFile
class VideoFileForm(forms.ModelForm):
"""Form for user file uploading."""
def clean(self):
cleaned_data = super().clean()
original_file = cleaned_data.get('original_file')
url = cleaned_data.get('url')
if original_file and url:
raise ValidationError(
_('Only one field must be filled.')
)
elif not original_file and not url:
raise ValidationError(
_('Please enter data in one of these fields.')
)
class Meta:
model = VideoFile
fields = (
'original_file',
'url',
)
|
sergeybe/video-archive
|
src/video/forms.py
|
forms.py
|
py
| 813 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.forms.ModelForm",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.core.exceptions.ValidationError",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.gettext_lazy",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.core.exceptions.ValidationError",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.gettext_lazy",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "models.VideoFile",
"line_number": 27,
"usage_type": "name"
}
] |
35424778344
|
from art import higher_lower,vs
from game_data import data
import random
import os
#Display art
score = 0
game_continue = True
account_b = random.choice(data)
def format_data(account):
"""Takes the account data and return the printable format"""
account_name = account["name"]
account_desc = account["description"]
account_country = account["country"]
return f"{account_name}, a {account_desc}, from {account_country}"
def check_answer(guess, a_follower,b_follower):
""" Take the user guess and follower count an dreturn if they got it right."""
if a_follower > b_follower:
return guess == "a"
else:
return guess == "b"
print(higher_lower)
#make the game repeatable
while game_continue:
#Generate random data
#Making the accounts at position B become the next positon A
account_a = account_b
account_b = random.choice(data)
while account_a == account_b:
account_b = random.choice(data)
#Format the account data into printable format
print(f"Comapre A : {format_data(account_a)}")
print(vs)
print(f"Against B : {format_data(account_b)}")
#ask user guess
guess = input("Who has more followers? Type 'A' or 'B' : ").lower()
#Check if user is correct.
## Get follower counr of each account.
a_follower_account = account_a["follower_count"]
b_follower_account = account_b["follower_count"]
is_correct = check_answer(guess, a_follower_account, b_follower_account)
#Give user feedback on their guesss.
#score keeping
if is_correct:
score += 1
print(f"You are right! Current score : {score}")
else:
game_continue = False
print(f"Sorry, that's wrong! Current score : {score}")
#clear the screen between rounds
|
pav537/Python
|
Higher_Lower Game.py
|
Higher_Lower Game.py
|
py
| 1,900 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "random.choice",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "game_data.data",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "art.higher_lower",
"line_number": 27,
"usage_type": "argument"
},
{
"api_name": "random.choice",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "game_data.data",
"line_number": 35,
"usage_type": "argument"
},
{
"api_name": "random.choice",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "game_data.data",
"line_number": 39,
"usage_type": "argument"
},
{
"api_name": "art.vs",
"line_number": 42,
"usage_type": "argument"
}
] |
23007665672
|
#!/usr/bin/env python
#
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Top levels scripts to extract castro data from an all-sky analysis
"""
import os
import argparse
import numpy as np
import yaml
from astropy import table
from fermipy import fits_utils
from dmpipe import dmp_roi
from dmpipe.dm_target import DMTargetFactory
def read_targets(filepath):
""" Read a set of targets from a fits file """
return read_targets_from_fits(filepath)
def read_targets_from_fits(fitsfile):
""" Read a set of targets from a fits file """
tab = table.Table.read(fitsfile)
mask = np.zeros(len(tab), bool)
key_col = tab['key']
for i in range(len(tab)):
mask[i] = key_col[i].find('point') != -1
tab_mask = tab[mask]
coords = np.ndarray((len(tab_mask), 2))
coords[:, 0] = tab_mask['glon']
coords[:, 1] = tab_mask['glat']
out_dict = {'targets': tab_mask['target'],
'coordinates': coords}
return out_dict
def read_targets_from_yaml(yamlfile):
""" Read a set of targets from a yaml file """
din = yaml.load(yamlfile)
coords = np.ndarray((len(din), 2))
for i, (key, val) in enumerate(din.items()):
coords[i, 0] = val['l']
coords[i, 1] = val['b']
out_dict = {'targets': list(din.keys()),
'coordinates': coords}
return out_dict
def add_columns(out_table, in_table, col_names):
""" Add columnes to a table """
for col in col_names:
out_table.add_column(in_table[col])
def main():
""" Hook for command line access """
# Argument defintion
usage = "usage: %(prog)s [input]"
description = "Collect all the new source"
parser = argparse.ArgumentParser(usage=usage, description=description)
parser.add_argument('--input', '-i', default='roi_set.yaml', help='ROI set definition file.')
parser.add_argument('--output', '-o', type=argparse.FileType('w'), help='Output file.')
parser.add_argument('--clobber', action='store_true', help='Overwrite output file.')
parser.add_argument('--targets', '-t', type=str, help='Target file.')
parser.add_argument('--filestr', '-f', default="tscube.fits",
help="Name of file within each ROI sub-directory")
# Argument parsing
args = parser.parse_args()
# Read the target file
targ_type = os.path.splitext(args.targets)[1]
print(targ_type)
if targ_type in ['.fit', '.fits']:
targets = DMTargetFactory.read_table(args.targets)
roster = None
else:
targets, roster = DMTargetFactory.make_table([args.targets])
# Get the sky_crds
sky_crds = DMTargetFactory.get_sky_crds(targets)
# Make the roi_set object
roi_set, basedir = dmp_roi.DMROISet.create_from_yaml(args.input)
# extract the data
out_table = roi_set.extract_table_data(sky_crds, args.filestr,
basedir=basedir, tables=["SCANDATA", "FITDATA"])
# add_names_column(out_table,targets['name'])
col_names = ['name', 'ra', 'dec', 'distance', 'proftype', 'glat', 'glon', 'j_integ', 'd_integ']
add_columns(out_table, targets, col_names)
ebounds_table = roi_set.extract_single_table(args.filestr, basedir=basedir, table="EBOUNDS")
# Write the output
fits_utils.write_tables_to_fits(args.output, [out_table, ebounds_table],
clobber=args.clobber, namelist=["SCANDATA", "EBOUNDS"])
if __name__ == '__main__':
main()
|
fermiPy/dmpipe
|
dmpipe/scripts/extract_castro_data.py
|
extract_castro_data.py
|
py
| 3,510 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "astropy.table.Table.read",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "astropy.table.Table",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "astropy.table",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "yaml.load",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "dmpipe.dm_target.DMTargetFactory.read_table",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "dmpipe.dm_target.DMTargetFactory",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "dmpipe.dm_target.DMTargetFactory.make_table",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "dmpipe.dm_target.DMTargetFactory",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "dmpipe.dm_target.DMTargetFactory.get_sky_crds",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "dmpipe.dm_target.DMTargetFactory",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "dmpipe.dmp_roi.DMROISet.create_from_yaml",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "dmpipe.dmp_roi.DMROISet",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "dmpipe.dmp_roi",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "fermipy.fits_utils.write_tables_to_fits",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "fermipy.fits_utils",
"line_number": 109,
"usage_type": "name"
}
] |
13305672302
|
import cv2
#image read
"""
img = cv2.imread("Resources/lena.png")
cv2.imshow("Output",img)
cv2.waitKey(0)
"""
#video read
"""
cap = cv2.VideoCapture("Resources/test_video.mp4")
while True:
success, img = cap.read()
cv2.imshow("Video",img)
if cv2.waitKey(1) & 0xFF ==ord('q'):
break
"""
#webcam
cap = cv2.VideoCapture(2)
cap.set(3,640)
cap.set(4,480)
cap.set(10,100)
while True:
success, img = cap.read()
cv2.imshow("Video",img)
if cv2.waitKey(1) & 0xFF ==ord('q'):
break
|
Umang-Seth/General_OpenCV_Fn
|
Load_Webcam.py
|
Load_Webcam.py
|
py
| 543 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.VideoCapture",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 29,
"usage_type": "call"
}
] |
15309466710
|
import asyncio
from misc import dp,bot
from .sqlit import reg_user
from aiogram import types
channel1 = -1001804437355
content_id = -1001165606914
print(1)
markup = types.InlineKeyboardMarkup()
bat_a = types.InlineKeyboardButton(text='Access to group 🔑', url = 'https://t.me/share/url?url=https%3A%2F%2Ft.me%2F%2BH4vDT3QPa381ODUy')
markup.add(bat_a)
async def posting():
while True:
q = await bot.copy_message(chat_id=channel1,from_chat_id=content_id,message_id=10,reply_markup=markup)
await asyncio.sleep(45)
await bot.delete_message(chat_id=channel1,message_id=q.message_id)
@dp.chat_join_request_handler()
async def join(update: types.ChatJoinRequest):
reg_user(update.from_user.id,1)
await bot.copy_message(chat_id=update.from_user.id, from_chat_id=content_id, message_id=16, reply_markup=markup)
try:
await update.approve()
except:
pass
@dp.message_handler(commands=['start'])
async def start_bot(message: types.Message):
reg_user(message.chat.id,ref=1)
print(2)
await bot.copy_message(chat_id=message.chat.id, from_chat_id=content_id, message_id=16, reply_markup=markup)
|
pytera895143242/spec2rep
|
handlers/commands_start.py
|
commands_start.py
|
py
| 1,164 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "aiogram.types.InlineKeyboardMarkup",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "aiogram.types",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "aiogram.types.InlineKeyboardButton",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "aiogram.types",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "misc.bot.copy_message",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "misc.bot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "asyncio.sleep",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "misc.bot.delete_message",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "misc.bot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "aiogram.types.ChatJoinRequest",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "aiogram.types",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "sqlit.reg_user",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "misc.bot.copy_message",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "misc.bot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "misc.dp.chat_join_request_handler",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "misc.dp",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "aiogram.types.Message",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "aiogram.types",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "sqlit.reg_user",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "misc.bot.copy_message",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "misc.bot",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "misc.dp.message_handler",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "misc.dp",
"line_number": 37,
"usage_type": "name"
}
] |
18464161519
|
# Libraries
import pandas as pd
import re, sqlite3
# Reading the data
data = pd.read_csv('Digimon_cards.csv')
# Connecting with the database
con = sqlite3.connect('Digimon_Cards.sqlite')
cur = con.cursor()
# Inserting the data
## Card type's table
insert_card_type = 'INSERT INTO Card_types(name) VALUES'
insert_card_type += '('
for card_type in data['Card_type'].unique():
insert_card_type += '"'+str(card_type) + '"'+'),('
insert_card_type = insert_card_type[:-2] +';'
cur.execute(insert_card_type)
con.commit()
## Color's table
insert_color = 'INSERT INTO Colors(name) VALUES'
insert_color += '('
for color in data['Color'].unique():
insert_color += '"'+str(color) + '"'+'),('
insert_color = insert_color[:-2] +';'
cur.execute(insert_color)
con.commit()
## Form's table
insert_form = 'INSERT INTO Forms(name) VALUES'
insert_form += '('
for form in data['Form'].unique():
insert_form += '"'+str(form) + '"'+'),('
insert_form = insert_form[:-2] +';'
insert_form = re.sub('\("-"\),', '', insert_form)
cur.execute(insert_form)
con.commit()
## Atribute's table
insert_attribute = 'INSERT INTO Attributes(name) VALUES'
insert_attribute += '('
for attribute in data['Attribute'].unique():
insert_attribute += '"'+str(attribute) + '"'+'),('
insert_attribute = insert_attribute[:-2] +';'
insert_attribute = re.sub('\("-"\),', '', insert_attribute)
cur.execute(insert_attribute)
con.commit()
## Digimon type's table
insert_digimon_type = 'INSERT INTO Digimon_types(name) VALUES'
insert_digimon_type += '('
for digimon_type in data['Digimon_type'].unique():
insert_digimon_type += '"'+str(digimon_type) + '"'+'),('
insert_digimon_type = insert_digimon_type[:-2] +';'
cur.execute(insert_digimon_type)
con.commit()
## Deck type's table
insert_deck_type = 'INSERT INTO Deck_types(name) VALUES'
insert_deck_type += '('
for deck_type in data['Deck_type'].unique():
insert_deck_type += '"'+str(deck_type) + '"'+'),('
insert_deck_type = insert_deck_type[:-2] +';'
cur.execute(insert_deck_type)
con.commit()
## Effect's table
### Combining all the effects features into one dataframe
Effects = list(data['Effect'])
Effects.extend(data['Digivolve_effect'])
Effects.extend(data['Security_effect'])
Effects = pd.DataFrame(Effects)[0].unique()
### Inserting the values
insert_effect = "INSERT INTO Effects(name) VALUES"
insert_effect = insert_effect + '('
for effect in Effects:
insert_effect = insert_effect + '"' + str(effect) + '"' + '),('
insert_effect = insert_effect[:-2]+ ';'
cur.execute(insert_effect)
con.commit()
## Digimon's data
colum_names = ['Card_type', 'Color', 'Form', 'Attribute','Digimon_type',
'Effect', 'Digivolve_effect', 'Security_effect', 'Deck_type']
insert_digimon = '''INSERT INTO Digimons(code, name, level, card_type_id, color_id, form_id, attribute_id,
digimon_type_id, DP, Play_cost, Digivolve_cost_1,
Digivolve_level_1, Digivolve_cost_2, Digivolve_level_2,
effect_id, digivolve_effect_id, security_effect_id, deck_type_id, 'Deck_name','Image_link') VALUES'''
for row in range(0,len(data)):
i = 0
insert_digimon += '('
for feature in data.iloc[row]:
title = data.columns.values[i]
feature = feature
if title in ['Effect', 'Digivolve_effect', 'Security_effect']:
title = 'Effect'
if title in colum_names:
select_query = 'SELECT id FROM '
select_query += str(title) + 's' + ' WHERE name = ? ;'
cur.execute(select_query, (feature, ))
try:
feature_id = cur.fetchone()[0]
insert_digimon += '"'+str(feature_id) + '"'+','
except:
insert_digimon += 'NULL,'
elif pd.isna(feature):
insert_digimon += 'NULL,'
else:
insert_digimon += '"' + str(feature) + '"'+','
i = i+1
insert_digimon = insert_digimon[:-1] +'),'
insert_digimon = insert_digimon[:-2] +');'
cur.execute(insert_digimon)
con.commit()
# Disconnecting from the database
con.close()
|
davidr9708/Digimon_Card_Game
|
Code/3_Data_insertion.py
|
3_Data_insertion.py
|
py
| 4,306 |
python
|
en
|
code
| 9 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "pandas.isna",
"line_number": 129,
"usage_type": "call"
}
] |
29575131193
|
import json
from argo_ams_library import AmsException, AmsMessage, ArgoMessagingService
class PullPublish:
def __init__(self, config):
self.pull_sub = config["pull_sub"]
self.pub_topic = config["pub_topic"]
self.pull_topic = config["pull_topic"]
self.ams = ArgoMessagingService(endpoint=config["host"], token=config["token"], project=config["project"])
def pull(self, nummsgs):
messages = []
try:
if not self.ams.has_sub(self.pull_sub):
self.ams.create_sub(self.pull_sub, self.pull_topic)
except AmsException as e:
print(e)
raise SystemExit(1)
# try to pull number of messages from subscription. method will
# return (ackIds, AmsMessage) tuples from which ackIds and messages
# payload will be extracted.
ackids = list()
for id, msg in self.ams.pull_sub(self.pull_sub, nummsgs):
data = msg.get_data()
# msgid = msg.get_msgid()
# attr = msg.get_attr()
messages.append(json.loads(data.decode("utf-8")))
# print('msgid={0}, data={1}, attr={2}'.format(msgid, data, attr))
ackids.append(id)
return messages, ackids
def ack(self, ackids):
# pass list of extracted ackIds to AMS Service so that
# it can move the offset for the next subscription pull
# (basically acknowledging pulled messages)
if ackids:
self.ams.ack_sub(self.pull_sub, ackids)
def publish(self, messages):
# messages = [{data:[{id:1},{state:'deployed'}],attributes=''}]
try:
if not self.ams.has_topic(self.pub_topic):
self.ams.create_topic(self.pub_topic)
except AmsException as e:
print(e)
raise SystemExit(1)
# publish one message to given topic. message is constructed with
# help of AmsMessage which accepts data and attributes keys.
# data is Base64 encoded, attributes is dictionary of arbitrary
# key/value pairs
msg = AmsMessage()
msglist = []
for message in messages:
msglist.append(msg(data=json.dumps(message["data"]), attributes={}))
try:
ret = self.ams.publish(self.pub_topic, msglist)
print(ret)
except AmsException as e:
print(e)
|
rciam/rciam-federation-registry-agent
|
ServiceRegistryAms/PullPublish.py
|
PullPublish.py
|
py
| 2,400 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "argo_ams_library.ArgoMessagingService",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "argo_ams_library.AmsException",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "argo_ams_library.AmsException",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "argo_ams_library.AmsMessage",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "argo_ams_library.AmsException",
"line_number": 63,
"usage_type": "name"
}
] |
15768547417
|
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Type
from django.db.models import JSONField
from django.db.models.lookups import Lookup
from pymilvus.client.types import DataType
from .lookups import get_nearest_n
if TYPE_CHECKING:
from django_milvus.connection import Connection
class MilvusField(JSONField):
def __init__(
self,
dim: int,
dtype: DataType,
*args: Any,
dbname: str = "default",
nlist: int = 1024,
nprobe: int = 32,
metric_type: str = "L2",
index_type: str = "IVF_FLAT",
**kwargs: Any,
) -> None:
self.dim = dim
self.dtype = dtype
self.dbname = dbname
self.nlist = nlist
self.nprobe = nprobe
self.metric_type = metric_type
self.index_type = index_type
super().__init__(*args, **kwargs)
def get_connection_class(self) -> Type["Connection"]:
from .connection import Connection
return Connection
def get_connection(self) -> Connection:
return self.get_connection_class()(self.dbname)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
kwargs.update(
{
"dim": self.dim,
"dtype": self.dtype,
"dbname": self.dbname,
"nlist": self.nlist,
"nprobe": self.nprobe,
"metric_type": self.metric_type,
"index_type": self.index_type,
}
)
return name, path, args, kwargs
def get_lookup(self, lookup_name: str) -> Type[Lookup] | None:
if lookup_name.startswith("nearest"):
try:
return get_nearest_n(
int(lookup_name[8:]),
self.model,
self,
self.get_connection(),
)
except ValueError:
raise ValueError(
"incorrect syntax when looking up nearby vectors: use nearest_{int}. got {lookup_name}"
)
else:
raise ValueError("Not supported lookup: " + lookup_name)
|
kaleido-public/django-milvus
|
django_milvus/fields.py
|
fields.py
|
py
| 2,190 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.db.models.JSONField",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "pymilvus.client.types.DataType",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "connection.Connection",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "connection.Connection",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "lookups.get_nearest_n",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "typing.Type",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "django.db.models.lookups.Lookup",
"line_number": 60,
"usage_type": "name"
}
] |
31574941802
|
from PIL import Image
import sys
import argparse
from os.path import exists
import time
import math
import re
import tkinter as tk
from tkinter import filedialog, ttk
import threading
import population
import province
import vicmap
mod_dir_loc = ""
save_file_loc = ""
map_type = ""
global_population = 0
mean_savings = 0
sd_savings = 0
all_pops = []
test_map = None
progress = None
gui_mode = True
verbose = False
out_file_location = "map_out.png"
def vprint(s):
if verbose:
print(s)
if gui_mode:
pass # Do something to the progress bar idk
# Loads a game file from the mod directory, or if missing, from the game directory.
def get_game_file_loc(location):
if exists(mod_dir_loc + location):
return mod_dir_loc + location
elif exists(game_dir + location):
return game_dir + location
else:
sys.exit("File not found " + location)
def split_dec(line):
sides = line.split("=")
return (sides[0].strip(), sides[1].strip())
def open_save(location):
return open(location, "r", encoding='iso-8859-1')
def read_save(save_file):
i = 0
current_prov = None
for line in save_file:
i = i + 1
if bool(re.search("^\d+=$", line)):
current_prov = int(split_dec(line)[0])
save_file.__next__()
if not bool(re.search("\tname", save_file.__next__())):
continue
save_file.__next__()
line = save_file.__next__()
if re.search("^}$", line.strip()):
province.id_dict[current_prov].is_water = True
elif bool(re.search(population.pop_regex, line.strip())):
population.POP(save_file, current_prov, split_dec(line)[0])
elif bool(re.search("battle=", line)):
province.make_battle(save_file)
vprint(f"Lines parsed: {i}")
def load_UI():
global progress
window = tk.Tk()
window.title("Victoria 2 Mapper")
save_file_entry = tk.Entry(width=100)
mod_dir = tk.Entry(width=100)
def set_mod_dir():
global mod_dir_loc
mod_dir_loc = tk.filedialog.askdirectory()
mod_dir.insert(0, mod_dir_loc)
ld_mod = tk.Button(text="Choose Mod", command=set_mod_dir)
def set_save_file():
global save_file_loc
save_file_loc = tk.filedialog.askopenfilename()
save_file_entry.insert(0, save_file_loc)
ld_save = tk.Button(text="Choose Save", command=set_save_file)
map_type_entry = tk.Entry(width = 100)
def make_map_ui():
make_map(map_type_entry.get())
make_button = tk.Button(text="Make Map", command=threading.Thread(target=make_map_ui).start)
progress = tk.ttk.Progressbar()
tk.Label(text="Save File:").grid(row = 0, column = 0, padx=3, pady=3)
save_file_entry.grid(row = 0, column = 1, padx=3, pady=3)
ld_save.grid(row = 0, column = 2, padx=3, pady=3)
tk.Label(text="Mod Directory:").grid(row = 1, column = 0, padx=3, pady=3)
mod_dir.grid(row = 1, column = 1, padx=3, pady=3)
ld_mod.grid(row = 1, column = 2, padx=3, pady=3)
tk.Label(text="Parameters:").grid(row = 2, column = 0, padx=3, pady=3)
map_type_entry.grid(row = 2, column = 1, padx=3, pady=3)
make_button.grid(row = 3, column = 1, padx=3, pady=3)
progress.grid(row = 4, column = 1, padx=3, pady=3)
window.mainloop()
# Map Function #
def draw_map(map_func):
global test_map
# Some poorly made maps have invalid colors, this uses the previous color as a backup.
prev_color = None # Previous color used on the province map
prev_draw = None
for x in range(vicmap.MAP_W):
for y in range(vicmap.MAP_H):
this_color = vicmap.pixel_map[x, y]
if (this_color == (0, 0, 0)):
this_color = prev_color
this_prov = province.color_dict[this_color]
test_map[x, vicmap.MAP_H - y - 1] = map_func(this_prov, x, y)
prev_color = this_color
def pop_attr_map(attr):
attr_dict = {
"religion" : population.religions,
"culture" : population.cultures,
"kind" : population.pop_types
}
attr_list = attr_dict[attr]
def out_func(this_prov, x, y):
out_colors = ((0, 0, 0),)
if this_prov.is_water:
return (255, 255, 255)
rel_tuple = this_prov.most_populous(attr)
out_colors = (attr_list[rel_tuple[0]], attr_list[rel_tuple[-1]])
if len(out_colors) > 1 and (x + y) % 5 == 0:
return out_colors[1]
else:
return out_colors[0]
return out_func
def pop_attr_heatmap(attr, kind):
most = province.get_most(attr, kind)
def out_func(this_prov, x, y):
if this_prov.is_water:
return (255, 255, 255)
return (int(255 * (this_prov.get_amnt(attr, kind)/most)), 0, 0)
return out_func
def pop_attr_percent_map(attr, kind):
def out_func(this_prov, x, y):
if this_prov.is_water:
return (255, 255, 255)
if this_prov.total_pop == 0:
return (0, 0, 0)
return (int(255 * (this_prov.get_amnt(attr, kind)/this_prov.total_pop)), 0, 0)
return out_func
def pop_average_savings():
most = 0
for prov in province.provinces:
if not prov.is_water and prov.total_pop > 0:
prov.avg_savings = sum([pop.money for pop in prov.POPs]) / prov.total_pop
most = prov.avg_savings if prov.avg_savings > most else most
print(most)
def out_func(this_prov, x, y):
if this_prov.is_water:
return (255, 255, 255)
if this_prov.total_pop == 0:
return (0, 0, 0)
return (int(255 * (this_prov.avg_savings/most)), 0, 0)
return out_func
def pop_magnitude_savings():
most = 0
for prov in province.provinces:
if not prov.is_water and prov.total_pop > 0:
prov.mag_savings = ((sum([pop.money for pop in prov.POPs]) / prov.total_pop) - mean_savings) / sd_savings
most = prov.mag_savings if prov.mag_savings > most else most
print(most)
def out_func(this_prov, x, y):
if this_prov.is_water:
return (255, 255, 255)
if this_prov.total_pop == 0:
return (0, 0, 0)
if this_prov.mag_savings < 0:
return (255, 0, 0)
return (0, 255, 0)
return out_func
def pop_total_savings():
most = 0
for prov in province.provinces:
if not prov.is_water and prov.total_pop > 0:
prov.total_savings = sum([pop.money for pop in prov.POPs])
most = prov.total_savings if prov.total_savings > most else most
def out_func(this_prov, x, y):
if this_prov.is_water:
return (255, 255, 255)
if this_prov.total_pop == 0:
return (0, 0, 0)
return (int(255 * (this_prov.total_savings/most)), 0, 0)
return out_func
def population_heatmap():
def out_func(this_prov, x, y):
if this_prov.is_water:
return (255, 255, 255)
return (int(255*(this_prov.total_pop/province.largest_prov_pop)), 0, 0)
return out_func
def battle_death_map():
most = max(province.provinces, key=lambda prov: prov.battle_deaths).battle_deaths
def out_func(this_prov, x, y):
if this_prov.is_water:
return (255, 255, 255)
return (int(255*(this_prov.battle_deaths/most)), 0, 0)
return out_func
def make_map(params):
global global_population, mean_savings, sd_savings, all_pops, test_map, progress, save_file_loc, mod_dir_loc
# Intertpret what kind of map the user wants.
map_types = {
"population" : (population_heatmap, 0),
"total_savings" : (pop_total_savings, 0),
"average_savings" : (pop_average_savings, 0),
"magnitude_savings" : (pop_magnitude_savings, 0),
"attr_percent" : (pop_attr_percent_map, 2),
"attr_heatmap" : (pop_attr_heatmap, 2),
"attr" : (pop_attr_map, 1),
"battle_deaths" : (battle_death_map, 0)
}
params = params.split(' ')
map_type_func = map_types[params[0]][0]
map_type_param_amnt = map_types[params[0]][1]
map_type_func_params = None
if map_type_param_amnt == 0:
map_type_func_params = ()
else:
map_type_func_params = tuple(params[1:1+map_type_param_amnt])
population.make_pop_regex()
vprint("Loading Files...")
vicmap.load_map(get_game_file_loc("/map/provinces.bmp"))
province.load_provinces(get_game_file_loc("/map/definition.csv"))
population.load_culture(get_game_file_loc("/common/cultures.txt"))
vprint("Reading Save...")
save_file = open_save(save_file_loc)
read_save(save_file)
vprint("Doing Stats...")
for prov in province.provinces:
prov.get_population()
for prov in province.provinces:
all_pops += prov.POPs
global_population = sum([prov.total_pop for prov in province.provinces])
mean_savings += sum([pop.money for pop in all_pops]) / global_population
sd_savings = math.sqrt(sum([((pop.money / pop.size - mean_savings)**2) * pop.size for pop in all_pops]) / global_population)
img = Image.new('RGB', (vicmap.MAP_W, vicmap.MAP_H), "BLACK")
test_map = img.load()
vprint("Drawing Map...")
draw_map(map_type_func(*map_type_func_params))
img.save(out_file_loc)
#img.show()
def print_license():
license = """
MIT License
Copyright (c) 2020 neopythagorean
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
print (license)
def command_line():
global mod_dir_loc, save_file_loc, out_file_loc, verbose
parser = argparse.ArgumentParser(description='Mapping tool for Victoria 2.')
parser.add_argument('desc', type=str, nargs='?', help='map description string')
parser.add_argument('-o', type=str, nargs='?', default='map_out.png', help='out file')
parser.add_argument('-s', type=str, nargs=1, help='save file')
parser.add_argument('-m', type=str, nargs=1, help='mod directory')
parser.add_argument('-g', type=str, nargs=1, help='game directory')
parser.add_argument('--verbose', action='store_true', help='print debug info')
parser.add_argument('--gui', action='store_true', help='force GUI')
parser.add_argument('--license', action='store_true', help='show license information')
p_args = parser.parse_args(sys.argv[1:])
verbose = p_args.verbose
vprint("--VICTORIA 2 MAPPER--")
if p_args.gui:
# Force GUI
load_UI()
return
if p_args.license:
print_license()
return
mod_dir_loc = p_args.m[0]
game_dir = p_args.g[0]
save_file_loc = p_args.s[0]
out_file_loc = p_args.o
start = time.perf_counter()
make_map(p_args.desc)
elapsed = time.perf_counter() - start
vprint(f"Done in {elapsed:.3f}s")
def main():
if len(sys.argv) == 1:
# No Arguments -- load GUI
load_UI()
else:
command_line()
main()
|
neopythagorean/vic2mapper
|
src/mapper.py
|
mapper.py
|
py
| 12,604 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "os.path.exists",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "province.id_dict",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "re.search",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "population.pop_regex",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "population.POP",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "province.make_battle",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "tkinter.Entry",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "tkinter.Entry",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog.askdirectory",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Button",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog.askopenfilename",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Button",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "tkinter.Entry",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk.Progressbar",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Label",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "vicmap.MAP_W",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "vicmap.MAP_H",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "vicmap.pixel_map",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "province.color_dict",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "vicmap.MAP_H",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "population.religions",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "population.cultures",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "population.pop_types",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "province.get_most",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "province.provinces",
"line_number": 197,
"usage_type": "attribute"
},
{
"api_name": "province.provinces",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "province.provinces",
"line_number": 233,
"usage_type": "attribute"
},
{
"api_name": "province.largest_prov_pop",
"line_number": 251,
"usage_type": "attribute"
},
{
"api_name": "province.provinces",
"line_number": 257,
"usage_type": "attribute"
},
{
"api_name": "population.make_pop_regex",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "vicmap.load_map",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "province.load_provinces",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "population.load_culture",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "province.provinces",
"line_number": 314,
"usage_type": "attribute"
},
{
"api_name": "province.provinces",
"line_number": 317,
"usage_type": "attribute"
},
{
"api_name": "province.provinces",
"line_number": 320,
"usage_type": "attribute"
},
{
"api_name": "math.sqrt",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "PIL.Image.new",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 326,
"usage_type": "name"
},
{
"api_name": "vicmap.MAP_W",
"line_number": 326,
"usage_type": "attribute"
},
{
"api_name": "vicmap.MAP_H",
"line_number": 326,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 373,
"usage_type": "attribute"
},
{
"api_name": "time.perf_counter",
"line_number": 393,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 399,
"usage_type": "attribute"
}
] |
39940042757
|
import numpy as np
import matplotlib.pyplot as plt
import os.path
import Style
import sys
zsims = ['3.61', '4.038','5.017']
simnom = ['SAGE']
cm = plt.get_cmap('tab10') # Colour map to draw colours from
path2sim = 'C:/Users/Olivia/TFG-TUT/'
for iiz, zsim in enumerate(zsims):
for sim in simnom:
ffav = path2sim + 'Datos_simulaciones/Medias_' + zsim + '.csv'
ffcorte = path2sim + 'Datos_simulaciones/Mass_SFR_corte_z_' + zsim + '.csv'
ff10 = path2sim + 'Datos_simulaciones/Mass_SFR_10_z_' + zsim + '.csv'
if not os.path.isfile(ffav):
continue
if not os.path.isfile(ffcorte):
continue
if not os.path.isfile(ff10):
continue
'''Definimos colores'''
cols = []
col = cm(1. * iiz / len(zsims))
cols.append(col)
#print(cols,col)
'''MEDIAS'''
ghist = np.loadtxt(ffav, skiprows=1, usecols=(0), unpack=True, delimiter=',')
avSFR = np.loadtxt(ffav, skiprows=1, usecols=(1), unpack=True, delimiter=',') #Msun h^-1 yr^-1
ErrorMass = np.loadtxt(ffav, skiprows=1, usecols=(2), unpack=True, delimiter=',')
'''CORTE'''
MassCorte = np.loadtxt(ffcorte, skiprows=1, usecols=(1), unpack=True, delimiter=',')
StarFRCorte = np.loadtxt(ffcorte, skiprows=1, usecols=(2), unpack=True, delimiter=',')
SFRCorte = StarFRCorte - 9 #Msun h^-1 yr^-1
'''10 PORCIENTO'''
Mass10 = np.loadtxt(ff10, skiprows=1, usecols=(1), unpack=True, delimiter=',')
StarFR10 = np.loadtxt(ff10, skiprows=1, usecols=(2), unpack=True, delimiter=',')
SFR10 = StarFR10 - 9 # Msun h^-1 yr^-1
indav = np.where(avSFR>0)
#indcorte = np.where(StarFR>0)
plt.style.use(Style.style1)
plt.plot(Mass10, SFR10, marker='.', color = 'steelblue', linewidth=0, label='10$\%$ SAGE z = ' + zsims[iiz] + '')
plt.plot(MassCorte, SFRCorte, marker='*', color = 'r', linewidth=0, label='corte SAGE z = ' + zsims[iiz] + '')
plt.plot(ghist[indav], avSFR[indav], marker='^', linewidth=0, color='k', label='SAGE z = ' + zsims[iiz] + '')
plt.errorbar(ghist[indav], avSFR[indav], yerr=ErrorMass[indav], xerr=None, fmt='.k')
plt.ylabel('log$_{10} \;$ (SFR $[M_{\odot} \; h^{-1}\; yr^{-1}$])')
plt.xlabel('log$_{10} \;$(M$ \; [M_{\odot} \; h^{-1} $])')
#plt.title('Media de la función SFR SAGE frente bines de masa de las galaxias')
#plt.xlim(8.4, 11.6)
plt.ylim(-2.5,3)
plotnom = path2sim + 'Figuras/Definitivas/Medias_corte_10_z_' + zsims[iiz] + '.png'
plt.legend()
plt.savefig(plotnom)
plt.show()
|
Ovive57/TFG-TUT
|
Dibujo_Medias_corte_10.py
|
Dibujo_Medias_corte_10.py
|
py
| 2,682 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "matplotlib.pyplot.get_cmap",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "os.path.path.isfile",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "os.path.path.isfile",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "os.path.path.isfile",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "numpy.loadtxt",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "Style.style1",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.errorbar",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 67,
"usage_type": "name"
}
] |
12731720405
|
#/usr/bin/python3.8
"""
This example implements the interaction between Qt Widgets and a 2D
matplotlib plot showing a gaussian curve with scipy.
This app displays a graph inside gui
"""
import sys
import numpy as np
from scipy.stats import norm
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT
from PySide6.QtCore import Qt, Slot
from PySide6.QtWidgets import (QtApplication, QWidget, QDoubleSpinBox, QVBoxLayout, QHBoxLayout)
class PlotWidget(QWidget):
def __init__(self,parent=None):
super().__init__(parent)
#create widgets
self.view = FigureCanvas(Figure(figsize=(5,3)))
self.axes = self.view.figure.subplots()
self.toolbar = NavigationToolbar2QT(self.view,self)
self.avg_input = QDoubleSpinBox()
self.std_input = QDoubleSpinBox()
self.avg_input.setPrefix("μ: ")
self.std_input.setPrefix("σ: ")
self.std_input.setValue(10)
#create layout
input_layout = QHBoxLayout() #widgets are aligned horiz
input_layout.addWidget(self.avg_input)
input_layout.addWidget(self.std_input)
vlayout = QVBoxLayout()
vlayout.addWidget(self.toolbar)
vlayout.addWidget(self.view)
vlayout.addWidget(self.input_layout)
self.setLayout(vlayout)
#connect input with a func
self.avg_input.valueChanged.connect(self.on_change)
self.std_input.valueChanged.connect(self.on_change)
#Exec on_change func
self.on_change()
@Slot() #connect to this func
def on_change(self):
# Update plot with input values
avg = self.avg_input.value() #get data from spinbox
std = self.std_input.value()
dx = np.linspace(-100,100)
dy = norm.pdf(x, avg, std)
self.axes.clear()
self.axes.plot(dx,dy)
self.view.draw()
if __name__ == "__main__":
app = QApplication(sys.argv)
wPlot = PlotWidget()
wPlot.show()
sys.exit(app.exec())
|
ndlopez/learn_python
|
learn_qt/qt_graph.py
|
qt_graph.py
|
py
| 2,123 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "PySide6.QtWidgets.QWidget",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvas",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.figure.Figure",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PySide6.QtWidgets.QDoubleSpinBox",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "PySide6.QtWidgets.QDoubleSpinBox",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "PySide6.QtWidgets.QHBoxLayout",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "PySide6.QtWidgets.QVBoxLayout",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "scipy.stats.norm.pdf",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "scipy.stats.norm",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "PySide6.QtCore.Slot",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 62,
"usage_type": "call"
}
] |
70504080829
|
from naman.core.models import VLan
from django.core.exceptions import ImproperlyConfigured
def assign_provisioning_vlan(machine):
print("Entering assign_provisioning_vlan")
prov_vlans = VLan.objects.filter(provisioning_purpose=True)
if prov_vlans.count() == 0:
raise ImproperlyConfigured("Missing provisioning vlans")
for vlan in prov_vlans:
try:
machine.get_vlanconfig().append_vlan(vlan)
return
except VLan.NoFreeIPError:
continue
raise VLan.NoFreeIPError("No free IPs at any provisioning vlan")
def assign_backup_vlan(machine):
#logging.basicConfig(level=logging.DEBUG)
print("Entering assign_backup_vlan")
for vlan in machine.environment.backup_vlans.all().order_by('name'):
try:
machine.get_vlanconfig().append_vlan(vlan)
return
except VLan.NoFreeIPError:
continue
raise VLan.NoFreeIPError("No free IPs at any backup vlan")
def assign_management_vlan(machine):
print("Entering management vlan")
man_vlans = VLan.objects.filter(management_purpose=True).order_by("name")
if man_vlans.count() == 0:
raise ImproperlyConfigured("Missing management vlans")
for vlan in man_vlans:
try:
machine.get_vlanconfig().append_vlan(vlan)
return
except VLan.NoFreeIPError:
continue
raise VLan.NoFreeIPError("No free IPs at any management vlan")
def assign_dmz_based_on_project(machine):
print("Entering dmz based on project vlan")
#if machine.dmz_located:
project = machine
if project is None or project.dmz is None:
raise ImproperlyConfigured(
"DMZ located machine must belong to a project which has dmz vlan assing")
machine.get_vlan_config().append_vlan(project.dmz)
def assign_service_vlan_based_on_project(machine):
print("Entering service vlan based on project")
project = machine.project
for vlan in project.service_vlans.all().order_by('name'):
try:
machine.get_vlanconfig().append_vlan(vlan)
return
except VLan.NoFreeIPError:
continue
def assign_general_purpose_service_vlan(machine):
print("General purpose service vlan")
for vlan in machine.environment.service_vlans.all().order_by('-name'):
#print "trying service vlan with: %s" % vlan
try:
machine.get_vlanconfig().append_vlan(vlan)
return
except VLan.NoFreeIPError:
continue
#raise VLan.NoFreeIPError("Can't assign free IP for service vlan")
mappings = [
assign_backup_vlan,
assign_management_vlan,
assign_provisioning_vlan,
assign_dmz_based_on_project,
assign_service_vlan_based_on_project,
assign_general_purpose_service_vlan,
]
|
jpardobl/naman
|
naman/core/mappings/vlan_actions.py
|
vlan_actions.py
|
py
| 2,859 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "naman.core.models.VLan.objects.filter",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "naman.core.models.VLan.objects",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "naman.core.models.VLan",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.core.exceptions.ImproperlyConfigured",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "naman.core.models.VLan.NoFreeIPError",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "naman.core.models.VLan",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "naman.core.models.VLan.NoFreeIPError",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "naman.core.models.VLan",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "naman.core.models.VLan.NoFreeIPError",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "naman.core.models.VLan",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "naman.core.models.VLan.NoFreeIPError",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "naman.core.models.VLan",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "naman.core.models.VLan.objects.filter",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "naman.core.models.VLan.objects",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "naman.core.models.VLan",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "django.core.exceptions.ImproperlyConfigured",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "naman.core.models.VLan.NoFreeIPError",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "naman.core.models.VLan",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "naman.core.models.VLan.NoFreeIPError",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "naman.core.models.VLan",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "django.core.exceptions.ImproperlyConfigured",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "naman.core.models.VLan.NoFreeIPError",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "naman.core.models.VLan",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "naman.core.models.VLan.NoFreeIPError",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "naman.core.models.VLan",
"line_number": 77,
"usage_type": "name"
}
] |
16257468366
|
import re
import pytest
from morphocut import Pipeline
from morphocut.file import Find, Glob
@pytest.mark.parametrize("sort", [True, False])
@pytest.mark.parametrize("verbose", [True, False])
def test_Find(data_path, sort, verbose, capsys):
d = data_path / "images"
with Pipeline() as pipeline:
filename = Find(d, [".png"], sort, verbose)
stream = pipeline.transform_stream()
filenames = [o[filename] for o in stream]
if sort:
assert filenames == sorted(filenames)
if verbose:
out = capsys.readouterr().out
assert re.search(r"^Found \d+ files in .+\.$", out)
def test_Glob(data_path):
d = data_path / "images/*.png"
with Pipeline() as pipeline:
result = Glob(d, True)
pipeline.run()
|
morphocut/morphocut
|
tests/test_file.py
|
test_file.py
|
py
| 770 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "morphocut.Pipeline",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "morphocut.file.Find",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "morphocut.Pipeline",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "morphocut.file.Glob",
"line_number": 31,
"usage_type": "call"
}
] |
19886899760
|
import os
import pytest
from pendulum import datetime
from pathlib import Path
from hypothesis_trio.stateful import (
initialize,
rule,
run_state_machine_as_test,
TrioAsyncioRuleBasedStateMachine,
)
from hypothesis import strategies as st
from guardata.client.types import EntryID, LocalFileManifest, Chunk
from guardata.client.fs.storage import WorkspaceStorage
from guardata.client.fs.workspacefs.file_transactions import FSInvalidFileDescriptor
from guardata.client.fs.exceptions import FSRemoteBlockNotFound
from tests.common import freeze_time, call_with_control
class File:
def __init__(self, local_storage, manifest):
self.fresh_manifest = manifest
self.entry_id = manifest.id
self.local_storage = local_storage
def ensure_manifest(self, **kwargs):
manifest = self.local_storage.manifest_storage._cache[self.entry_id]
for k, v in kwargs.items():
assert getattr(manifest, k) == v
def is_cache_ahead_of_persistance(self):
return self.entry_id in self.local_storage.manifest_storage._cache_ahead_of_localdb
async def get_manifest(self):
return await self.local_storage.get_manifest(self.entry_id)
async def set_manifest(self, manifest):
async with self.local_storage.lock_manifest(self.entry_id):
await self.local_storage.set_manifest(self.entry_id, manifest)
def open(self):
return self.local_storage.create_file_descriptor(self.fresh_manifest)
@pytest.fixture
async def foo_txt(alice, alice_file_transactions):
local_storage = alice_file_transactions.local_storage
now = datetime(2000, 1, 2)
placeholder = LocalFileManifest.new_placeholder(alice.device_id, parent=EntryID(), now=now)
remote_v1 = placeholder.to_remote(author=alice.device_id, timestamp=now)
manifest = LocalFileManifest.from_remote(remote_v1)
async with local_storage.lock_entry_id(manifest.id):
await local_storage.set_manifest(manifest.id, manifest)
return File(local_storage, manifest)
@pytest.mark.trio
async def test_close_unknown_fd(alice_file_transactions):
with pytest.raises(FSInvalidFileDescriptor):
await alice_file_transactions.fd_close(42)
@pytest.mark.trio
async def test_operations_on_file(alice_file_transactions, foo_txt):
file_transactions = alice_file_transactions
fd = foo_txt.open()
assert isinstance(fd, int)
with freeze_time("2000-01-03"):
await file_transactions.fd_write(fd, b"hello ", 0)
await file_transactions.fd_write(fd, b"world !", -1)
await file_transactions.fd_write(fd, b"H", 0)
await file_transactions.fd_write(fd, b"", 0)
assert foo_txt.is_cache_ahead_of_persistance()
fd2 = foo_txt.open()
await file_transactions.fd_write(fd2, b"!!!", -1)
data = await file_transactions.fd_read(fd2, 1, 0)
assert data == b"H"
await file_transactions.fd_close(fd2)
foo_txt.ensure_manifest(
size=16,
is_placeholder=False,
need_sync=True,
base_version=1,
created=datetime(2000, 1, 2),
updated=datetime(2000, 1, 3),
)
data = await file_transactions.fd_read(fd, 5, 6)
assert data == b"world"
await file_transactions.fd_close(fd)
assert not foo_txt.is_cache_ahead_of_persistance()
fd2 = foo_txt.open()
data = await file_transactions.fd_read(fd2, -1, 0)
assert data == b"Hello world !!!!"
await file_transactions.fd_close(fd2)
assert not foo_txt.is_cache_ahead_of_persistance()
foo_txt.ensure_manifest(
size=16,
is_placeholder=False,
need_sync=True,
base_version=1,
created=datetime(2000, 1, 2),
updated=datetime(2000, 1, 3),
)
@pytest.mark.trio
async def test_flush_file(alice_file_transactions, foo_txt):
file_transactions = alice_file_transactions
fd = foo_txt.open()
foo_txt.ensure_manifest(
size=0,
is_placeholder=False,
need_sync=False,
base_version=1,
created=datetime(2000, 1, 2),
updated=datetime(2000, 1, 2),
)
with freeze_time("2000-01-03"):
await file_transactions.fd_write(fd, b"hello ", 0)
await file_transactions.fd_write(fd, b"world !", -1)
assert foo_txt.is_cache_ahead_of_persistance()
foo_txt.ensure_manifest(
size=13,
is_placeholder=False,
need_sync=True,
base_version=1,
created=datetime(2000, 1, 2),
updated=datetime(2000, 1, 3),
)
await file_transactions.fd_flush(fd)
assert not foo_txt.is_cache_ahead_of_persistance()
await file_transactions.fd_close(fd)
assert not foo_txt.is_cache_ahead_of_persistance()
foo_txt.ensure_manifest(
size=13,
is_placeholder=False,
need_sync=True,
base_version=1,
created=datetime(2000, 1, 2),
updated=datetime(2000, 1, 3),
)
@pytest.mark.trio
async def test_block_not_loaded_entry(alice_file_transactions, foo_txt):
file_transactions = alice_file_transactions
foo_manifest = await foo_txt.get_manifest()
chunk1_data = b"a" * 10
chunk2_data = b"b" * 5
chunk1 = Chunk.new(0, 10).evolve_as_block(chunk1_data)
chunk2 = Chunk.new(10, 15).evolve_as_block(chunk2_data)
foo_manifest = foo_manifest.evolve(blocks=((chunk1, chunk2),), size=15)
async with file_transactions.local_storage.lock_entry_id(foo_manifest.parent):
await foo_txt.set_manifest(foo_manifest)
fd = foo_txt.open()
with pytest.raises(FSRemoteBlockNotFound):
await file_transactions.fd_read(fd, 14, 0)
await file_transactions.local_storage.set_chunk(chunk1.id, chunk1_data)
await file_transactions.local_storage.set_chunk(chunk2.id, chunk2_data)
data = await file_transactions.fd_read(fd, 14, 0)
assert data == chunk1_data + chunk2_data[:4]
@pytest.mark.trio
async def test_load_block_from_remote(alice_file_transactions, foo_txt):
file_transactions = alice_file_transactions
# Prepare the backend
workspace_id = file_transactions.remote_loader.workspace_id
await file_transactions.remote_loader.create_realm(workspace_id)
foo_manifest = await foo_txt.get_manifest()
chunk1_data = b"a" * 10
chunk2_data = b"b" * 5
chunk1 = Chunk.new(0, 10).evolve_as_block(chunk1_data)
chunk2 = Chunk.new(10, 15).evolve_as_block(chunk2_data)
foo_manifest = foo_manifest.evolve(blocks=((chunk1, chunk2),), size=15)
await foo_txt.set_manifest(foo_manifest)
fd = foo_txt.open()
await file_transactions.remote_loader.upload_block(chunk1.access, chunk1_data)
await file_transactions.remote_loader.upload_block(chunk2.access, chunk2_data)
await file_transactions.local_storage.clear_clean_block(chunk1.access.id)
await file_transactions.local_storage.clear_clean_block(chunk2.access.id)
data = await file_transactions.fd_read(fd, 14, 0)
assert data == chunk1_data + chunk2_data[:4]
size = st.integers(min_value=0, max_value=4 * 1024 ** 2) # Between 0 and 4MB
@pytest.mark.slow
@pytest.mark.skipif(os.name == "nt", reason="Windows file style not compatible with oracle")
def test_file_operations(
tmpdir, hypothesis_settings, reset_testbed, file_transactions_factory, alice, alice_backend_cmds
):
tentative = 0
class FileOperationsStateMachine(TrioAsyncioRuleBasedStateMachine):
async def start_transactions(self):
async def _transactions_controlled_cb(started_cb):
async with WorkspaceStorage.run(alice, Path("/dummy"), EntryID()) as local_storage:
file_transactions = await file_transactions_factory(
self.device, alice_backend_cmds, local_storage=local_storage
)
await started_cb(file_transactions=file_transactions)
self.transactions_controller = await self.get_root_nursery().start(
call_with_control, _transactions_controlled_cb
)
@initialize()
async def init(self):
nonlocal tentative
tentative += 1
await reset_testbed()
self.device = alice
await self.start_transactions()
self.file_transactions = self.transactions_controller.file_transactions
self.local_storage = self.file_transactions.local_storage
self.fresh_manifest = LocalFileManifest.new_placeholder(
alice.device_id, parent=EntryID()
)
self.entry_id = self.fresh_manifest.id
async with self.local_storage.lock_entry_id(self.entry_id):
await self.local_storage.set_manifest(self.entry_id, self.fresh_manifest)
self.fd = self.local_storage.create_file_descriptor(self.fresh_manifest)
self.file_oracle_path = tmpdir / f"oracle-test-{tentative}.txt"
self.file_oracle_fd = os.open(self.file_oracle_path, os.O_RDWR | os.O_CREAT)
async def teardown(self):
if not hasattr(self, "fd"):
return
await self.file_transactions.fd_close(self.fd)
os.close(self.file_oracle_fd)
@rule(size=size, offset=size)
async def read(self, size, offset):
data = await self.file_transactions.fd_read(self.fd, size, offset)
os.lseek(self.file_oracle_fd, offset, os.SEEK_SET)
expected = os.read(self.file_oracle_fd, size)
assert data == expected
@rule(content=st.binary(), offset=size)
async def write(self, content, offset):
await self.file_transactions.fd_write(self.fd, content, offset)
os.lseek(self.file_oracle_fd, offset, os.SEEK_SET)
os.write(self.file_oracle_fd, content)
@rule(length=size)
async def resize(self, length):
await self.file_transactions.fd_resize(self.fd, length)
os.ftruncate(self.file_oracle_fd, length)
@rule()
async def reopen(self):
await self.file_transactions.fd_close(self.fd)
self.fd = self.local_storage.create_file_descriptor(self.fresh_manifest)
os.close(self.file_oracle_fd)
self.file_oracle_fd = os.open(self.file_oracle_path, os.O_RDWR)
run_state_machine_as_test(FileOperationsStateMachine, settings=hypothesis_settings)
|
bitlogik/guardata
|
tests/client/fs/workspacefs/test_file_transactions.py
|
test_file_transactions.py
|
py
| 10,393 |
python
|
en
|
code
| 9 |
github-code
|
6
|
[
{
"api_name": "pendulum.datetime",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "guardata.client.types.LocalFileManifest.new_placeholder",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "guardata.client.types.LocalFileManifest",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "guardata.client.types.EntryID",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "guardata.client.types.LocalFileManifest.from_remote",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "guardata.client.types.LocalFileManifest",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "pytest.raises",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "guardata.client.fs.workspacefs.file_transactions.FSInvalidFileDescriptor",
"line_number": 60,
"usage_type": "argument"
},
{
"api_name": "pytest.mark",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "tests.common.freeze_time",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "pendulum.datetime",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "pendulum.datetime",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "pendulum.datetime",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "pendulum.datetime",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "pendulum.datetime",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "pendulum.datetime",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "tests.common.freeze_time",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "pendulum.datetime",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "pendulum.datetime",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "pendulum.datetime",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "pendulum.datetime",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "guardata.client.types.Chunk.new",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "guardata.client.types.Chunk",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "guardata.client.types.Chunk.new",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "guardata.client.types.Chunk",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "pytest.raises",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "guardata.client.fs.exceptions.FSRemoteBlockNotFound",
"line_number": 176,
"usage_type": "argument"
},
{
"api_name": "pytest.mark",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "guardata.client.types.Chunk.new",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "guardata.client.types.Chunk",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "guardata.client.types.Chunk.new",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "guardata.client.types.Chunk",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "pytest.mark",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "hypothesis.strategies.integers",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "hypothesis.strategies",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "hypothesis_trio.stateful.TrioAsyncioRuleBasedStateMachine",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "guardata.client.fs.storage.WorkspaceStorage.run",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "guardata.client.fs.storage.WorkspaceStorage",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "guardata.client.types.EntryID",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "tests.common.call_with_control",
"line_number": 232,
"usage_type": "argument"
},
{
"api_name": "guardata.client.types.LocalFileManifest.new_placeholder",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "guardata.client.types.LocalFileManifest",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "guardata.client.types.EntryID",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "os.open",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "os.O_RDWR",
"line_number": 255,
"usage_type": "attribute"
},
{
"api_name": "os.O_CREAT",
"line_number": 255,
"usage_type": "attribute"
},
{
"api_name": "hypothesis_trio.stateful.initialize",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "os.close",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "os.lseek",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "os.SEEK_SET",
"line_number": 266,
"usage_type": "attribute"
},
{
"api_name": "os.read",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "hypothesis_trio.stateful.rule",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "os.lseek",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "os.SEEK_SET",
"line_number": 273,
"usage_type": "attribute"
},
{
"api_name": "os.write",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "hypothesis_trio.stateful.rule",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "hypothesis.strategies.binary",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "hypothesis.strategies",
"line_number": 270,
"usage_type": "name"
},
{
"api_name": "os.ftruncate",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "hypothesis_trio.stateful.rule",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "os.close",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "os.open",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "os.O_RDWR",
"line_number": 286,
"usage_type": "attribute"
},
{
"api_name": "hypothesis_trio.stateful.rule",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "hypothesis_trio.stateful.run_state_machine_as_test",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 215,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.skipif",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "os.name",
"line_number": 216,
"usage_type": "attribute"
}
] |
74530690747
|
#!/usr/bin/env python3
"""antipatibot, discord server."""
import asyncio
import logging
import os
import secrets
from dataclasses import dataclass
import discord
from discord.ext import commands
import yt_dlp as youtube_dl
youtube_dl.utils.bug_reports_message = lambda: ''
ytdl_format_options = {
'format': 'bestaudio/best',
'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': False,
'extract_flat': 'in_playlist', # don't extract stream urls / thumbnails # ... for playlists.
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0' # bind to ipv4 since ipv6 addresses cause issues sometimes
}
ffmpeg_options = {
'options': '-vn'
}
ytdl = youtube_dl.YoutubeDL(ytdl_format_options)
class YTDLSource(discord.PCMVolumeTransformer):
"""Youtube source class, which allows the bot to play youtube videos"""
def __init__(self, source, *, data, volume=0.5):
super().__init__(source, volume)
self.data = data
self.title = data.get('title')
self.url = data.get('url')
@classmethod
async def from_url(cls, url, *, loop=None, stream=False):
"""Returns an audio from a youtube link."""
loop = loop or asyncio.get_event_loop()
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream))
if 'entries' in data:
# Take first item from a playlist.
# This should never happen, since we handle this in the play command,
# but better safe than sorry.
data = data['entries'][0]
filename = data['url'] if stream else ytdl.prepare_filename(data)
return cls(discord.FFmpegPCMAudio(filename, **ffmpeg_options), data=data)
@dataclass()
class BotSettings:
"""Bot settings associated to each antipatibot instance."""
discord_token: str = os.getenv("ANTIPATIBOT_DISCORD_TOKEN", "")
command_prefix: str = os.getenv("ANTIPATIBOT_COMMAND_PREFIX", "!")
max_queue_size: int = int(os.getenv("ANTIPATIBOT_MAX_QUEUE_SIZE", "1000"))
idle_timeout: int = int(os.getenv("ANTIPATIBOT_IDLE_TIMEOUT", "300"))
@dataclass()
class GuildData:
"""Data associated to each guild: song queue, music task and lock."""
guild_id: int = None
lock: asyncio.Lock = asyncio.Lock()
queue: asyncio.Queue = None
task: asyncio.Task = None
loop: bool = False
def __init__(self, guild_id: int, max_queue_size: int, task: asyncio.Task = None):
self.guild_id = guild_id
self.task = task
self.queue = asyncio.Queue(max_queue_size)
# pylint: disable=R0201,R0904
class AntipatiBot(commands.Cog):
"""AntipatiBot's collection of command."""
def __init__(self, bot, log, settings: BotSettings):
self.bot = bot
self.log = log
self.settings = settings
self.guild_data = {}
async def cog_command_error(self, ctx, error):
message = ctx.message.content
self.log.error(f"command_error:{ctx.guild.id}:{self.log.sanitize(ctx.author)}" +
f":{ctx.command}:{ctx.author.id}:{message}:{error}")
await ctx.message.reply("Invalid command.")
async def cog_before_invoke(self, ctx):
self.log.info(
f"command:{ctx.guild.id}:{self.log.sanitize(ctx.author)}:{ctx.author.id}:{ctx.command}")
@commands.Cog.listener()
async def on_ready(self):
"""Triggers when the bot is ready to run, used to log basic information."""
self.log.info("login:%s", self.bot.user)
for guild in self.bot.guilds:
self.log.info("joined_guild:%d:%s", guild.id, self.log.sanitize(guild.name))
self.guild_data[guild.id] = GuildData(guild.id, self.settings.max_queue_size)
async def ensure_guild_thread(self, guild_id: int):
"""Ensure the music_player_loop thread is running for the given guild_id"""
guild_data = self.guild_data[guild_id]
async with guild_data.lock:
if guild_data.task is None:
guild_data.task = asyncio.create_task(self.music_player_loop(guild_data))
async def terminate_guild_thread(self, guild_id: int):
"""Kill the music_player_loop thread for the given guild_id"""
guild_data = self.guild_data[guild_id]
async with guild_data.lock:
if guild_data.task is not None:
guild_data.task.cancel()
guild_data.queue = asyncio.Queue(self.settings.max_queue_size)
guild_data.loop = False
guild_data.task = None
@commands.Cog.listener()
async def on_voice_state_update(self, member: discord.Member, before: discord.VoiceState,
after: discord.VoiceState):
"""Triggers when the bot joins or leaves a voice channel.
Starts the music_player_loop for the given guild."""
guild_id = member.guild.id
if self.bot.user == member:
"""Ensure the music_player_loop is alive/dead"""
if after.channel is not None:
await self.ensure_guild_thread(guild_id)
elif after.channel is None:
await self.terminate_guild_thread(guild_id)
else:
"""Terminate when you are left alone on a channel"""
voice = discord.utils.get(self.bot.voice_clients, guild__id=guild_id)
if voice is not None and len(voice.channel.members) < 2:
# FIXME: possible race condition on idle timeout
await voice.disconnect()
await self.terminate_guild_thread(guild_id)
async def music_player_loop(self, guild_data: GuildData):
"""Task which handles the queue list, cross-guild in theory (wip)."""
self.log.info(f"music_player_loop:{guild_data.guild_id}:start")
while True:
try:
(song_request, ctx) = \
await asyncio.wait_for(guild_data.queue.get(), self.settings.idle_timeout)
self.log.info("song request: " + str(song_request))
player = await YTDLSource.from_url(song_request, loop=self.bot.loop, stream=True)
playing_current_song = asyncio.Event()
def on_song_end(error):
if error is not None:
self.log.error("Player error: %s", error)
playing_current_song.set()
ctx.voice_client.play(player, after=on_song_end)
await ctx.send(f"Now playing: {player.title}")
await playing_current_song.wait()
if guild_data.loop:
try:
guild_data.queue.put_nowait((song_request, ctx))
except asyncio.QueueFull:
pass
except asyncio.CancelledError:
self.log.info(f"music_player_loop:{guild_data.guild_id}:cancelled")
return
except asyncio.TimeoutError:
self.log.info(f"music_player_loop:{guild_data.guild_id}:timeout")
voice = discord.utils.get(self.bot.voice_clients, guild__id=guild_data.guild_id)
if voice is not None:
await voice.disconnect()
return
except Exception as exception: # pylint: disable=W0703
self.log.warning(f"music_player_loop:{guild_data.guild_id}:uncaught exception: {exception}")
@commands.command()
async def join(self, ctx, *, channel: discord.VoiceChannel = None):
"""
Either join a given voice channel or move to the author voice channel.
If no channel is specified, connect to the user's current voice channel.
"""""
if channel is None:
if ctx.author.voice is None:
return await ctx.message.reply("You are not connected to a voice channel.")
channel = ctx.author.voice.channel
if ctx.voice_client is not None:
await ctx.voice_client.move_to(channel)
else:
await channel.connect()
await self.ensure_guild_thread(ctx.guild.id)
@commands.command(aliases=["cicca"])
async def cichero(self, ctx):
"""Great classic."""
return await self.play(ctx, song_request="https://www.youtube.com/watch?v=DAuPe14li4g")
@commands.command(aliases=["jhon"])
async def john(self, ctx):
"""He truly is."""
return await self.play(ctx, song_request="https://www.youtube.com/watch?v=dALcFSyFcXs")
@commands.command()
async def bastardo(self, ctx):
"""Mia mamma è una brava donna, napoletana ma brava"""
return await self.play(ctx, song_request="https://www.youtube.com/watch?v=12gmyUCqLxA")
@commands.command(aliases=["p", "youtube", "yt"])
async def play(self, ctx, *, song_request: str):
"""Add youtube song to playlist."""
async with ctx.typing():
guild_data = self.guild_data[ctx.guild.id]
songs = [ytdl.extract_info(song_request, download=False)]
if "entries" in songs[0]:
# YouTube playlist
songs = list(song for song in songs[0]["entries"])
for song in songs:
try:
guild_data.queue.put_nowait((song["url"], ctx))
except asyncio.QueueFull:
await ctx.message.reply(
f"Song queue is full (Max size: {guild_data.queue.maxsize})")
return
await ctx.message.reply("Song added to the queue" if len(songs) == 1
else f"Added {len(songs)} songs to the queue.")
@commands.command(aliases=["clear", "clean", "hairottoilcazzo"])
async def stop(self, ctx, *, reply=True):
"""Clear the queue and stop playing music"""
guild_data = self.guild_data[ctx.guild.id]
try:
while True:
guild_data.queue.get_nowait()
except asyncio.QueueEmpty:
await self.skip(ctx)
if reply:
await ctx.message.reply("Song queue cleared and music stopped")
@commands.command(aliases=["kill", "terminate", "harakiri"])
async def disconnect(self, ctx):
"""Clear the queue, stop playing music and disconnect from the channel"""
await self.stop(ctx, reply=False)
if ctx.voice_client is not None:
await ctx.voice_client.disconnect()
@commands.command(aliases=["next"])
async def skip(self, ctx):
"""Skip the song that is currently playing."""
if ctx.voice_client is not None and ctx.voice_client.is_playing():
ctx.voice_client.stop()
@commands.command()
async def loop(self, ctx):
"""Toggle the loop functionality"""
async with ctx.typing():
guild_data = self.guild_data[ctx.guild.id]
guild_data.loop = not guild_data.loop
await ctx.message.reply(f"Loop {'activated' if guild_data.loop else 'deactivated'}")
@commands.command(aliases=["die", "roll"])
async def dice(self, ctx, num: int = 1, sides: int = 20, show_sides: bool = True):
"""Roll an n sided dice"""
if sides < 1 or sides > 0x1337 or num < 1 or num > 40:
return await ctx.message.reply("You have been added to a list.")
if num == 1:
return await ctx.message.reply((f"[d{sides}] " if show_sides else "") +
f"You rolled a {secrets.randbelow(sides) + 1}")
rolls = [secrets.randbelow(sides) + 1 for _ in range(num)]
return await ctx.message.reply(
f"[{num}d{sides}] You rolled {'+'.join([str(r) for r in rolls])} = {sum(rolls)}")
# pylint: disable=C0103
@commands.command()
async def d4(self, ctx, n=1):
"""Roll a 4-sided dice"""
await self.dice(ctx, sides=4, num=n, show_sides=False)
@commands.command()
async def d6(self, ctx, n=1):
"""Roll a 6-sided dice"""
await self.dice(ctx, sides=6, num=n, show_sides=False)
@commands.command()
async def d8(self, ctx, n=1):
"""Roll a 8-sided dice"""
await self.dice(ctx, sides=8, num=n, show_sides=False)
@commands.command()
async def d10(self, ctx, n=1):
"""Roll a 10-sided dice"""
await self.dice(ctx, sides=10, num=n, show_sides=False)
@commands.command()
async def d12(self, ctx, n=1):
"""Roll a 10-sided dice"""
await self.dice(ctx, sides=12, num=n, show_sides=False)
@commands.command()
async def d20(self, ctx, n=1):
"""Roll a 20-sided dice"""
await self.dice(ctx, sides=20, num=n, show_sides=False)
@commands.command()
async def d100(self, ctx, n=1):
"""Roll a 100-sided dice"""
await self.dice(ctx, sides=100, num=n, show_sides=False)
@play.before_invoke
@cichero.before_invoke
@john.before_invoke
@bastardo.before_invoke
async def ensure_voice(self, ctx):
"""Pre-hook used to ensure you the bot is connected to a voice channel before starting to
play music."""
return await self.join(ctx)
def main():
"""Entrypoint for antipatibot program"""
logging.basicConfig(level=logging.INFO)
logging.getLogger("discord").setLevel(logging.WARNING)
log = logging.getLogger("antipatibot")
# log.setLevel(logging.DEBUG)
settings = BotSettings()
bot = commands.Bot(command_prefix=commands.when_mentioned_or(settings.command_prefix),
description="AntipatiBot")
log.sanitize = lambda message: str(message).replace(":", "_") \
.replace("\r", "\\r") \
.replace("\n", "\\n") \
.replace("\t", "\\t")
bot.add_cog(AntipatiBot(bot, log, settings))
try:
discord_api_file = "/antipatibot/discord_token.txt"
if os.path.exists(discord_api_file) and os.path.isfile(discord_api_file):
with open(discord_api_file, encoding='utf8') as file:
settings.discord_token = file.read().strip("\n\r\t ")
bot.run(settings.discord_token)
except discord.errors.LoginFailure:
log.error("invalid_discord_token:Please set a valid discord bot API token.")
if __name__ == "__main__":
main()
|
antipatico/antipatibot
|
antipatibot.py
|
antipatibot.py
|
py
| 14,449 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "yt_dlp.utils",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "yt_dlp.YoutubeDL",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "discord.PCMVolumeTransformer",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "discord.FFmpegPCMAudio",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "asyncio.Lock",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "asyncio.Queue",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "asyncio.Task",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "asyncio.Task",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "asyncio.Queue",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Cog.listener",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "asyncio.create_task",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "asyncio.Queue",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "discord.Member",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "discord.VoiceState",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "discord.VoiceState",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "discord.utils.get",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "discord.utils",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Cog.listener",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "asyncio.wait_for",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "asyncio.Event",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "asyncio.QueueFull",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "asyncio.CancelledError",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "asyncio.TimeoutError",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "discord.utils.get",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "discord.utils",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "discord.VoiceChannel",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "asyncio.QueueFull",
"line_number": 231,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "asyncio.QueueEmpty",
"line_number": 245,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 238,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 250,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 257,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "secrets.randbelow",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "secrets.randbelow",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 271,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 284,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 289,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 294,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 299,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 304,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 309,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 314,
"usage_type": "name"
},
{
"api_name": "logging.basicConfig",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 331,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "logging.WARNING",
"line_number": 332,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 336,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.when_mentioned_or",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 347,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "discord.errors",
"line_number": 351,
"usage_type": "attribute"
}
] |
69877253629
|
import torch
from torch import nn
import yaml
import cv2
import numpy as np
from vidgear.gears import CamGear
from matplotlib import pyplot as plt
from IPython.display import Image, clear_output
import argparse
import os
import datetime
import sys
from PIL import ImageFont, ImageDraw, Image
import time
from pathlib import Path
from utils.plots import *
from utils.torch_utils import *
from utils.general import *
from utils.datasets import letterbox
import gdown
from deep_sort.utils.parser import get_config
from deep_sort.deep_sort import DeepSort
import pymysql
# path 정리
path = "C:/Users/YongJun/Desktop/YOLO/1228_TNS/images"
model = torch.load('C:/Users/YongJun/Desktop/YOLO/YOLOv5s_1229.pt')
image_paths = sorted([os.path.join(path, f) for f in os.listdir(path) if f.endswith(".jpg") or f.endswith(".png")])
label_paths = sorted([os.path.join(path, f) for f in os.listdir(path) if f.endswith(".txt")])
# DB 연결
conn = pymysql.connect(
host='localhost',
user='root',
password='013579',
db='tns_db',
charset='utf8'
)
# DB 저장 데이터 - 시작 시간 기록
start_time = time.time()
# 라벨링한 클래스
class_dict = {
"OK": 0,
"NG_Blur": 1,
"NG_Scratch": 2,
}
ok_idx = class_dict['OK']
ng_blur_idx = class_dict['NG_Blur']
ng_scratch_idx = class_dict['NG_Scratch']
labels = []
# 웹캠 설정
cap1 = cv2.VideoCapture(0)
cap2 = cv2.VideoCapture(1)
# 웹캠 1번 탐지
ok_count1 = 0
ng_blur_count1 = 0
ng_scratch_count1 = 0
# 웹캠 2번 탐지
ok_count2 = 0
ng_blur_count2 = 0
ng_scratch_count2 = 0
# 최종 탐지
ok_count = 0
ng_count = 0
# 추가 부분
ok_detected1 = False
ok_detected2 = False
ng_detected1 = False
ng_detected2 = False
# YOLO 실행
while True:
# 현재 시간 기록
current_time = time.time()
# 이전 프레임과의 시간 차이 계산
elapsed_time = current_time - start_time
# 이전 프레임의 시간 업데이트
start_time = current_time
ret1, frame1 = cap1.read()
ret2, frame2 = cap2.read()
labels1 = []
labels2 = []
results1 = model(frame1)
results2 = model(frame2)
detections1 = results1.xyxy[0]
detections2 = results2.xyxy[0]
# 각 객체에 대해 Loop를 돌며, Line을 지나갔는지 검사
for detection in detections1:
# 객체의 중심점 좌표 계산
center_x = (detection[0] + detection[2]) / 2
center_y = (detection[1] + detection[3]) / 2
# 객체가 Line을 지나갔는지 검사 - Line 설정 (317 ~ 323)
if center_x > 317 and center_x < 323:
label = detection[5]
labels1.append(label)
if label == ok_idx:
ok_count1 += 1
elif label == ng_blur_idx:
ng_blur_count1 += 1
elif label == ng_scratch_idx:
ng_scratch_count1 += 1
# 각 객체에 대해 Loop를 돌며, Line을 지나갔는지 검사
for detection in detections2:
# 객체의 중심점 좌표 계산
center_x = (detection[0] + detection[2]) / 2
center_y = (detection[1] + detection[3]) / 2
# 객체가 Line을 지나갔는지 검사 - Line 설정 (317 ~ 323)
if center_x > 317 and center_x < 323:
label = detection[5]
labels2.append(label)
if label == ok_idx:
ok_count2 += 1
elif label == ng_blur_idx:
ng_blur_count2 += 1
elif label == ng_scratch_idx:
ng_scratch_count2 += 1
# 추가 부분
if ok_idx in labels1 and ok_idx in labels2:
ok_detected1 = True
ok_detected2 = True
if ok_detected1 and ok_detected2:
ok_count += 1
ok_detected1 = False
ok_detected2 = False
if ng_blur_idx in labels1 or ng_scratch_idx in labels1:
ng_detected1 = True
if ng_blur_idx in labels2 or ng_scratch_idx in labels2:
ng_detected2 = True
if ng_detected1 or ng_detected2:
ng_count += 1
ng_detected1 = False
ng_detected2 = False
# DB 연동
cursor = conn.cursor()
count = 0
for detection in detections1:
count += 1
name = f"name{count}"
# SQL 문을 통해서 MariaDB의 Table에 저장 - 제품 , 개수 , 상태 , confidence 등 추가
# (INSERT INTO tns (table name) id (table column ... ) VALUES (%s , %s , ...), (python_msg, python_msg))
cursor.execute("INSERT INTO tns (id) VALUES (%s)",
(ok_count1))
conn.commit()
# 동영상에서 나오는 cv2 의 text 정리
cv2.putText(frame1, f'OK: {ok_count}',
(30, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(frame1, f'NG_Blur: {ng_blur_count}',
(30, 55), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(frame1, f'NG_Scratch: {ng_scratch_count}',
(30, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(frame2, f'OK: {ok_count2}',
(30, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(frame2, f'NG_Blur: {ng_blur_count2}',
(30, 55), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(frame2, f'NG_Scratch: {ng_scratch_count2}',
(30, 80), cv2.FONT_HERSHEY_SIMPLEX, 1,(0, 0, 255), 2)
cv2.line(frame1, (320, 0), (320, 640), (255, 0, 0), 2)
cv2.line(frame2, (320, 0), (320, 640), (255, 0, 0), 2)
cv2.imshow('TNS_CAP1', np.squeeze(results1.render()))
cv2.imshow('TNS_CAP2', np.squeeze(results2.render()))
# 'q' 키를 누르면 종료
if cv2.waitKey(1) == ord("q"):
break
# 종료시 리소스 해제
cap1.release()
cap2.release()
cv2.destroyAllWindows()
|
yeonsoo98/yolov5_object_count
|
detect.py
|
detect.py
|
py
| 5,852 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.load",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pymysql.connect",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 185,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "cv2.line",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "cv2.line",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "numpy.squeeze",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "numpy.squeeze",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 209,
"usage_type": "call"
}
] |
42557211122
|
import numpy
from PIL import Image
def histogram(img):
image_gray = img.convert('L')
killy = numpy.array(image_gray)
maximum = numpy.max(killy)
minimum = numpy.min(killy)
dim = maximum - minimum + 1
hist, bins = numpy.histogram(killy, bins=dim)
return hist
image1 = Image.open("image1.jpg")
hist1 = histogram(image1)
image2 = Image.open("image2.jpg")
hist2 = histogram(image2)
image3 = Image.open("image3.jpg")
hist3 = histogram(image3)
image4 = Image.open("image4.jpg")
hist4 = histogram(image4)
image5 = Image.open("image5.jpg")
hist5 = histogram(image5)
image6 = Image.open("image6.jpg")
hist6 = histogram(image6)
image7 = Image.open("image7.jpg")
hist7 = histogram(image7)
image8 = Image.open("image8.jpg")
hist8 = histogram(image8)
image9 = Image.open("image9.jpg")
hist9 = histogram(image9)
image10 = Image.open("image10.jpg")
hist10 = histogram(image10)
def calcul_distance(h1, h2):
size1 = len(h1)
size2 = len(h2)
somme = 0
somme2 = 0
if size2 > size1:
for i in range(size2):
if i < size1:
somme = somme + (min(h1[i], h2[i]))
else:
somme = somme + h2[i]
else:
for i in range(size1):
if i < size2:
somme = somme + (min(h1[i], h2[i]))
else:
somme = somme + h1[i]
for i in range(size1):
somme2 = somme2 + h1[i]
distance = 1 - somme / somme2
return distance
distance1 = calcul_distance(hist1, hist2)
distance2 = calcul_distance(hist1, hist3)
distance3 = calcul_distance(hist1, hist4)
distance4 = calcul_distance(hist1, hist5)
distance5 = calcul_distance(hist1, hist6)
distance6 = calcul_distance(hist1, hist7)
distance7 = calcul_distance(hist1, hist8)
distance8 = calcul_distance(hist1, hist9)
distance9 = calcul_distance(hist1, hist10)
dictionary = {"image2": distance1, "image3": distance2, "image4": distance3, "image5": distance4,
"image6": distance5, "image7": distance6, "image8": distance7, "image9": distance8,
"image10": distance9}
print("Order of similarity used the request image <<image1>> : ")
for w in sorted(dictionary, key=lambda x: dictionary[x] if x in dictionary else None, reverse=False):
print(w, dictionary[w])
|
jouhaina-nasri/Project-Indexation
|
TP Indexation/Histogramme/app.py
|
app.py
|
py
| 2,290 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.array",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.histogram",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 42,
"usage_type": "name"
}
] |
28427138730
|
from smtplib import SMTP
from email.header import Header
from email.mime.text import MIMEText
def main():
# 请自行修改下面的邮件发送者和接收者
sender = '[email protected]'
receivers = ['[email protected]', '[email protected]']
message = MIMEText('用Python发送邮件的示例代码.', 'plain', 'utf-8')
message['From'] = Header('大锤', 'utf-8')
message['To'] = Header('发放', 'utf-8')
message['Subject'] = Header('示例代码实验邮件', 'utf-8')
smtper = SMTP('smtp.zhongfu.net')
print("qqq")
# 请自行修改下面的登录口令
smtper.login(sender, '密码')
smtper.sendmail(sender, receivers, message.as_string())
print('邮件发送完成!')
if __name__ == '__main__':
main()
|
sunhuimoon/Python100Days
|
day14/day1403.py
|
day1403.py
|
py
| 772 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "email.mime.text.MIMEText",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "email.header.Header",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "email.header.Header",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "email.header.Header",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "smtplib.SMTP",
"line_number": 14,
"usage_type": "call"
}
] |
38907343805
|
from sqlalchemy.exc import IntegrityError
from sqlalchemy import select
from sqlalchemy.orm import selectinload
from app.core.repo.base import BaseSqlalchemyRepo
from app.core.exceptions.repo import RepoException
from .models import Review
class ReviewRepo(BaseSqlalchemyRepo):
model = Review
async def create(self, db_session, obj_in):
"""
Create review
"""
try:
return await super().create(db_session, obj_in)
except IntegrityError as e:
raise RepoException("Review name must be unique", e)
async def get_review_by_room_id(self, db_session, room_id):
stmt = select(self.model).options(selectinload(self.model.user)).where(self.model.room_id == room_id)
result = await db_session.execute(stmt)
return result.scalars().all()
async def get(self, db_session, id):
"""
Get review by id
"""
obj = await super().get(db_session, id)
if not obj:
raise RepoException("Review not found", None, status=404)
return obj
async def get_all(self, db_session):
"""
Get all reviews
"""
return await super().list(db_session)
async def update(self, db_session, id, obj_in):
"""
Update review by id
"""
db_obj = await self.get(db_session, id)
if not db_obj:
raise RepoException("Review not found", None, status=404)
try:
return await super().update(db_session, db_obj, obj_in)
except IntegrityError as e:
raise RepoException("Review title must be unique", e)
async def delete(self, db_session, id, user_id):
"""
Delete review by id
"""
await self.get(db_session, id=id)
return await super().delete(db_session, id=id)
|
rasimatics/excursio-backend
|
app/apps/review/repo.py
|
repo.py
|
py
| 1,901 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "app.core.repo.base.BaseSqlalchemyRepo",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "models.Review",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.exc.IntegrityError",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "app.core.exceptions.repo.RepoException",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.select",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.selectinload",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "app.core.exceptions.repo.RepoException",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "app.core.exceptions.repo.RepoException",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.exc.IntegrityError",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "app.core.exceptions.repo.RepoException",
"line_number": 54,
"usage_type": "call"
}
] |
71316404667
|
import requests
from bs4 import BeautifulSoup
import json
def get_pinned(github_user):
URL = f"https://github.com/{github_user}"
page = requests.get(URL)
soup = BeautifulSoup(page.content, "html.parser")
pinned_data = soup.find_all("div", {"class": "pinned-item-list-item-content"})
pinned_posts = []
for post in pinned_data:
pinned_posts.append(post.find("a")["href"])
return pinned_posts
def get_projects(github_user, query):
URL = f"https://github.com/{github_user}?tab=repositories&q={query}&type=source"
page = requests.get(URL)
soup = BeautifulSoup(page.content, "html.parser")
projects = soup.body.find("ul", {"data-filterable-for": "your-repos-filter"})
if not projects:
return []
projects = projects.find_all("li")
projects_parsed = []
for project in projects:
project_data = {}
title = project.find("h3").a
project_data["name"] = title.text.strip().replace("-", " ").capitalize()
project_data["link"] = title["href"]
project_data["tags"] = [query]
impact = project.find("div", class_="f6 color-text-secondary mt-2")
if impact:
impact = impact.find_all("a")
for data in impact:
project_data[data["href"].split("/")[-1]] = int(data.text.strip())
if "stargazers" not in project_data:
project_data["stargazers"] = 0
if "members" not in project_data:
project_data["members"] = 0
project_data["score"] = project_data["stargazers"] + project_data["members"] * 5
else:
project_data["score"] = 0
projects_parsed.append(project_data)
return projects_parsed
def get_youtube_data(youtube_username):
initial_data = "var ytInitialData = "
final_data = ";"
url = f"https://www.youtube.com/{youtube_username}/videos"
page = requests.get(url)
soup = BeautifulSoup(page.content, "html.parser")
scripts = soup.body.find_all("script")
videos_data = []
for script in scripts:
data = script.encode_contents().decode(errors="replace")
if initial_data not in data:
continue
data = data.replace(initial_data, "").replace(final_data, "")
tab_renderers = json.loads(data)["contents"]
tab_renderers = tab_renderers["twoColumnBrowseResultsRenderer"]["tabs"]
for tab in tab_renderers:
if "tabRenderer" not in tab:
continue
if tab["tabRenderer"]["title"] != "Videos":
continue
videos = tab["tabRenderer"]["content"]["sectionListRenderer"]
videos = videos["contents"][0]["itemSectionRenderer"]
videos = videos["contents"][0]["gridRenderer"]["items"]
for video in videos:
if "gridVideoRenderer" not in video:
continue
video = video["gridVideoRenderer"]
published = ""
if "publishedTimeText" in video:
published = video["publishedTimeText"]["simpleText"]
view_count_text = ""
if "simpleText" in video["viewCountText"]:
view_count_text = video["viewCountText"]["simpleText"]
video_data = {
"thumbnail": video["thumbnail"]["thumbnails"][-1]["url"],
"title": video["title"]["runs"][0]["text"],
"published": published,
"viewCountText": view_count_text,
"url": f"https://www.youtube.com/watch?v={video['videoId']}",
}
videos_data.append(video_data)
return videos_data
|
HectorPulido/HectorPulido
|
ReadmeGenerator/scraper.py
|
scraper.py
|
py
| 3,744 |
python
|
en
|
code
| 10 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 76,
"usage_type": "call"
}
] |
18100363274
|
"""
1143. Longest Common Subsequence
https://leetcode.com/problems/longest-common-subsequence/
"""
from typing import Dict, List, Tuple
from unittest import TestCase, main
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
"""
This is a classic DP problem
text1 = "abcde", text2 = "ace"
1. Since the 1st chars are the same (= "a"), we can break it down to a subproblem.
-> text1 = "bcde", text2 = "ce" + 1
2. The next 2 chars aren't the same ("b", "c"), let's divide it into 2 subproblem.
-> text1 = "cde", text2 = "ce" + 1, or
-> text1 = "bcde", text2 = "c" + 1
2-1. The next 2 chars are the same ("c")
-> text1 = "de", text2 = "e" + 1 + 1
3. The next 2 chars are not ("d", "e"), so let's divide it in to 2 subproblem.
-> text1 = "de", text2 = "" + 1 + 1, or
-> text1 = "e", text2 = "e" + 1 + 1
3-2. The next 2 chars are the same ("e")
-> 1 + 1 + 1
4. With this approach we can start from the last to fist indexes, using DP.
"""
m, n = len(text1), len(text2)
dp = [[0] * (n + 1) for _ in range(m + 1)] # Need extra column and row
for i in range(m - 1, -1, -1): # start from the 2nd last character
for j in range(n - 1, -1, -1): # start from the 2nd last character
if text1[i] == text2[j]:
dp[i][j] = dp[i + 1][j + 1] + 1
else:
dp[i][j] = max(dp[i][j + 1], dp[i + 1][j])
return dp[0][0]
class Test(TestCase):
data_set: List[Tuple[str, str, int]] = [
("abcde", "ace", 3),
("abc", "abc", 3),
("abc", "def", 0),
]
def test_solution(self):
for a, b, expected in self.data_set:
s = Solution()
self.assertEqual(s.longestCommonSubsequence(a, b), expected)
if __name__ == "__main__":
main()
|
hirotake111/leetcode_diary
|
leetcode/1143/solution.py
|
solution.py
|
py
| 1,955 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "unittest.TestCase",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "unittest.main",
"line_number": 55,
"usage_type": "call"
}
] |
25218471687
|
from django.db import models
from article.models import Article
from users.models import User
from ckeditor.fields import RichTextField
from mptt.models import MPTTModel, TreeForeignKey
# Create your models here.
class Comment(MPTTModel):
article = models.ForeignKey(Article, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
body = RichTextField()
created = models.DateTimeField(auto_now_add=True)
#mptt树形结构
parent = TreeForeignKey(
'self',
on_delete=models.CASCADE,
null=True,
blank=True,
related_name='children',
)
#记录二级评论回复
reply_to = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name='replyers',
)
class MPTTMeta:
order_insertion_by = ['created']
def __str__(self):
return self.body[:20]
|
MenGG6/personal-blog
|
comment/models.py
|
models.py
|
py
| 824 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "mptt.models.MPTTModel",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "article.models",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "article.models.Article",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "django.db.models",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "users.models.User",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "django.db.models",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "ckeditor.fields.RichTextField",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "mptt.models.TreeForeignKey",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "users.models.User",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "django.db.models",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 25,
"usage_type": "name"
}
] |
34045997209
|
from faster_rcnn.config import cfg, get_output_dir
import argparse
from utils_py3.timer import Timer
import numpy as np
import cv2
from utils_py3.cython_nms import nms
# from utils_py3.boxes_grid import get_boxes_grid
import pickle
# import heapq
from utils_py3.blob_helper import im_list_to_blob
import os
import math
import tensorflow as tf
from faster_rcnn.bbox_transform import clip_boxes, bbox_transform_inv
import matplotlib.pyplot as plt
from tensorflow.python.client import timeline
import time
import pdb
def _get_image_blob(im):
"""
Convert an image into a network input.
Argument :
im(ndarray): a color image in BGR order
Returns :
blob(ndarray): a data blob holding an image pyramid
im_scales_factors(list):list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
print("im_shape is:",im_shape)
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
print("min and max:",im_size_min,im_size_max)
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
# def _project_im_rois(im_rois, scales):
# """Project image RoIs into the image pyramid built by _get_image_blob.
# Arguments:
# im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
# scales (list): scale factors as returned by _get_image_blob
# Returns:
# rois (ndarray): R x 4 matrix of projected RoI coordinates
# levels (list): image pyramid levels used by each projected RoI
# """
# im_rois = im_rois.astype(np.float, copy=False)
# scales = np.array(scales)
#
# if len(scales) > 1:
# widths = im_rois[:, 2] - im_rois[:, 0] + 1
# heights = im_rois[:, 3] - im_rois[:, 1] + 1
#
# areas = widths * heights
# scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
# diff_areas = np.abs(scaled_areas - 224 * 224)
# levels = diff_areas.argmin(axis=1)[:, np.newaxis]
# else:
# levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
#
# rois = im_rois * scales[levels]
#
# return rois, levels
def _get_blobs(im):
"""
Convert an image and RoIs within that image into inputs
"""
blobs = {'data': None, 'rois': None}
blobs['data'], im_scale_factors = _get_image_blob(im)
return blobs, im_scale_factors
def _clip_boxes(boxes, im_shape):
"""
Clip boxes to image boundaries
"""
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes
def _recales_boxes(boxes, inds, scales):
"""
Rescale boxes according to image rescaling
"""
for i in range(boxes.shape[0]):
boxes[i, :] = boxes[i, :] / scales[int(inds[i])]
return boxes
def im_detect(sess, net, im, boxes=None):
"""
Detect object classes in an image given object proposals
Arguments:
net: faster rcnn network to use
im: color image to test(in BGR order)
boxes(ndarray): R X 4 array of object proposals
Returns:
scores(ndarray): R X K array of object class scores(K includes
background as object category 0)
boxes(ndarray): R X (4*K) array of predicted bounding boxes
"""
blobs, im_scales = _get_blobs(im)
im_blob = blobs['data']
blobs['im_info'] = np.array([[im_blob.shape[1],
im_blob.shape[2],
im_scales[0]]], dtype=np.float32)
print(blobs['im_info'])
feed_dict = {net.data: blobs['data'], net.im_info: blobs['im_info'],
net.keep_prob: 1.0}
run_options = None
run_metadata = None
if cfg.TEST.DEBUG_TIMELINE:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
# pdb.set_trace()
cls_score, cls_prob, bbox_pred, rois = sess.run(
[net.get_output('cls_score'), net.get_output('cls_prob'), net.get_output('bbox_pred'),
net.get_output('rois')], feed_dict = feed_dict, options=run_options,run_metadata=run_metadata
)
assert len(im_scales) == 1, "Only single-image batch implemented"
boxes = rois[:, 1:5] / im_scales[0]
scores = cls_prob
if cfg.TEST.BBOX_REG:
box_deltas = bbox_pred
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = _clip_boxes(pred_boxes, im.shape)
if cfg.TEST.DEBUG_TIMELINE:
trace = timeline.Timeline(step_stats=run_metadata.step_stats)
trace_file = open(str(long(time.time() * 1000)) + '-test-timeline.ctf.json', 'w')
trace_file.write(trace.generate_chrome_trace_format(show_memory=False))
trace_file.close()
return scores,pred_boxes
def vis_detections(im, class_name, dets, thresh=0.8):
"""
Visual debugging of detections
"""
import matplotlib.pyplot as plt
for i in range(np.minimum(10, dets.shape[0])):
bbox = dets[i, :4]
score = dets[i, -1]
if score > thresh:
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='g', linewidth=3)
)
plt.gca().text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
plt.title('{} {:.3f}'.format(class_name, score))
# def apply_nms(all_boxes, thresh):
def test_net(sess, net, imdb, weights_filename, max_per_image=300,
thresh=0.05, vis=False):
"""
Test a faster rcnn network on an image database
"""
num_images = len(imdb.image_index)
# pdb.set_trace()
all_boxes = [[[] for _ in range(num_images)]
for _ in range(imdb.num_classes)]
output_dir = get_output_dir(imdb, weights_filename)
_t = {'im_detect': Timer(), 'misc': Timer()}
for i in range(2):
box_proposals = None
im = cv2.imread(imdb.image_path_at(i))
_t['im_detect'].tic()
scores, boxes = im_detect(sess, net, im, box_proposals)
_t['im_detect'].toc()
# pdb.set_trace()
_t['misc'].tic()
if vis:
image = im[:, :, (2, 1, 0)]
plt.cla()
plt.imshow(image)
# skip j = 0, because it's the background class
for j in range(1, imdb.num_classes):
inds = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep, :]
print(cls_dets)
if vis:
vis_detections(image, imdb.classes[j], cls_dets)
all_boxes[j][i] = cls_dets
if vis:
plt.show()
# Limit to max_per_image detections * over all classes *
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
if j in range(1, imdb.num_classes):
keep = np.where([all_boxes[j][i][:, -1]] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
_t['misc'].toc()
print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time,
_t['misc'].average_time))
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
# print ('Evaluating detections')
# imdb.evaluate_detections(all_boxes, output_dir)
|
hx121071/faster-rcnn-tf-py3
|
lib/faster_rcnn/test.py
|
test.py
|
py
| 8,840 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "numpy.float32",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "faster_rcnn.config.cfg.PIXEL_MEANS",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "faster_rcnn.config.cfg",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "numpy.min",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "faster_rcnn.config.cfg.TEST",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "faster_rcnn.config.cfg",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "numpy.round",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "faster_rcnn.config.cfg.TEST",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "faster_rcnn.config.cfg",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "faster_rcnn.config.cfg.TEST",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "faster_rcnn.config.cfg",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "cv2.resize",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_LINEAR",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "utils_py3.blob_helper.im_list_to_blob",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.maximum",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "numpy.maximum",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.minimum",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.minimum",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "faster_rcnn.config.cfg.TEST",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "faster_rcnn.config.cfg",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "tensorflow.RunOptions",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "tensorflow.RunMetadata",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "faster_rcnn.config.cfg.TEST",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "faster_rcnn.config.cfg",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "faster_rcnn.bbox_transform.bbox_transform_inv",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "faster_rcnn.config.cfg.TEST",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "faster_rcnn.config.cfg",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "tensorflow.python.client.timeline.Timeline",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "tensorflow.python.client.timeline",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "numpy.minimum",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.Rectangle",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "faster_rcnn.config.get_output_dir",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "utils_py3.timer.Timer",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.cla",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "numpy.where",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",
"line_number": 232,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 233,
"usage_type": "attribute"
},
{
"api_name": "utils_py3.cython_nms.nms",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "faster_rcnn.config.cfg.TEST",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "faster_rcnn.config.cfg",
"line_number": 235,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "numpy.hstack",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "numpy.sort",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 260,
"usage_type": "attribute"
},
{
"api_name": "pickle.dump",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "pickle.HIGHEST_PROTOCOL",
"line_number": 262,
"usage_type": "attribute"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.