seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
24347960900
|
import numpy as np
import os
from .image_extraction import extract_images_from_pdf
from .images import get_box
from PIL import Image
from flask import current_app
def reference_image(exam_id, page, dpi, widget_area_in=None, padding=0):
"""Returns a reference image for a specified area
The reference image is a flattened image of the
problem on the original PDF
Parameters
----------
exam_id : int
The id of the exam to use
page : int
The page number to get the reference image for
dpi : int
The desired DPI of the image
widget_area_in : numpy array
The widget coordinates as numpy array
If None, return the full page
padding : float
Extra padding to apply in inches
Returns
-------
image_path : string
Location of the image.
"""
app_config = current_app.config
data_directory = app_config["DATA_DIRECTORY"]
generated_path = os.path.join(data_directory, f"{exam_id}_data", "blanks", f"{dpi}")
if not os.path.exists(generated_path):
_extract_reference_images(dpi, exam_id)
image_path = os.path.join(generated_path, f"page{page:02d}.jpg")
if not os.path.exists(image_path):
_extract_reference_images(dpi, exam_id)
blank_page = Image.open(image_path)
blank_img_array = np.array(blank_page)
if widget_area_in is not None:
return get_box(blank_img_array, widget_area_in, padding=padding)
else:
return blank_img_array
def _extract_reference_images(dpi, exam_id):
"""Extract and save reference images for the specified exam
Saves the images at:
{data_directory}/{exam_id}_data/blanks/{dpi}/page{page}.jpg
Parameters
----------
dpi : int
The desired DPI for the extracted images
exam_id : int
The id of the desired exam
"""
data_directory = current_app.config["DATA_DIRECTORY"]
output_directory = os.path.join(data_directory, f"{exam_id}_data")
pdf_path = os.path.join(output_directory, "exam.pdf")
pages = extract_images_from_pdf(pdf_path, dpi=dpi)
for page, (image, _) in enumerate(pages, start=1):
_save_image(image, page, dpi, output_directory)
def _save_image(image, page, dpi, output_directory):
"""Save an image at an appropriate location.
Saves the images at:
{output_directory}/blanks/{dpi}/page{page}.jpg
Parameters
----------
image : PIL Image
Image data.
page : int
The corresponding page number, starting at 1.
dpi : int
The DPI of the image to save.
output_directory : path
The output directory of the exam the page is from.
Returns
-------
image_path : string
Location of the image.
"""
submission_path = os.path.join(output_directory, "blanks", f"{dpi}")
os.makedirs(submission_path, exist_ok=True)
image_path = os.path.join(submission_path, f"page{page-1:02d}.jpg")
image.save(image_path)
return image_path
|
zesje/zesje
|
zesje/blanks.py
|
blanks.py
|
py
| 3,019 |
python
|
en
|
code
| 9 |
github-code
|
6
|
[
{
"api_name": "flask.current_app.config",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "flask.current_app",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "images.get_box",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "flask.current_app.config",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "flask.current_app",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "image_extraction.extract_images_from_pdf",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 104,
"usage_type": "attribute"
}
] |
10702615749
|
# pylint: disable=E0401
import js
import functools
from pyodide.ffi import create_once_callable, create_proxy
from enum import Enum
from typing import Callable, TypedDict, Sequence, MutableMapping, Union
State = MutableMapping[str, Union[str, int, dict, list, None]]
Actions = dict[str, Callable]
Attributes = dict[str, Union[str, int, tuple[str]]]
class VDom(TypedDict):
node_name: str
attributes: Attributes
children: Sequence[Union[str, "VDom"]]
def p(
node_name: str, attributes: Attributes, children: Sequence[Union[str, "VDom"]]
) -> VDom:
if not isinstance(children, Sequence):
return {
"node_name": node_name,
"attributes": attributes,
"children": [children],
}
return {"node_name": node_name, "attributes": attributes, "children": children}
class App:
def __init__(
self,
selector: str,
state: State,
view: Callable[[State, Actions], VDom],
actions: Actions,
):
def dispatch_action(action, state, data):
action(state, data)
self.resolve_node()
self.view = view
self.state = state
self.actions = {
name: functools.partial(dispatch_action, action)
for name, action in actions.items()
}
self.skip_render = False
self.new_node = None
self.current_node = None
self.dom_manager = DomManager(selector)
self.resolve_node()
def resolve_node(self):
self.new_node = self.view(self.state, self.actions)
self.schedule_render()
def render(self, _):
self.dom_manager.render(self.new_node)
self.skip_render = False
def schedule_render(self):
if not self.skip_render:
self.skip_render = True
js.requestAnimationFrame(create_once_callable(self.render))
class DomManager:
def __init__(self, selector: str) -> None:
self.element = js.document.querySelector(selector)
self.element.innerHTML = ""
self.v_current_node = None
class ChangeType(Enum):
NONE = 1
TYPE = 2
TEXT = 3
NODE = 4
VALUE = 5
ATTR = 6
def render(self, v_new_node):
if self.v_current_node:
self.update_element(self.element, self.v_current_node, v_new_node)
else:
self.element.appendChild(self.create_element(v_new_node))
self.v_current_node = v_new_node
def create_element(self, v_node):
if not self.is_v_node(v_node):
return js.document.createTextNode(str(v_node))
element = js.document.createElement(v_node["node_name"])
self.set_attributes(element, v_node["attributes"])
for child in v_node["children"]:
element.appendChild(self.create_element(child))
return element
def set_attributes(self, element, attributes):
for attr, value in attributes.items():
if self.is_event_attr(attr):
element.addEventListener(attr[2:].lower(), create_proxy(value))
else:
element.setAttribute(str(attr), value)
def update_element(
self, parent_node, v_current_node, v_new_node, current_node_index=0
):
if not v_current_node:
parent_node.appendChild(self.create_element(v_new_node))
return
current_node = (
parent_node.childNodes[current_node_index]
if len(parent_node.childNodes) > current_node_index
else parent_node.childNodes[-1]
)
if not v_new_node:
parent_node.removeChild(current_node)
return
change_type = self.change_type(v_current_node, v_new_node)
if change_type in [
self.ChangeType.TYPE,
self.ChangeType.TEXT,
self.ChangeType.NODE,
]:
parent_node.replaceChild(self.create_element(v_new_node), current_node)
if change_type == self.ChangeType.VALUE:
current_node.value = v_new_node["attributes"].get("value")
if change_type == self.ChangeType.ATTR:
self.update_attributes(
current_node, v_current_node["attributes"], v_new_node["attributes"]
)
if not self.is_v_node(v_current_node) or not self.is_v_node(v_new_node):
return
for i in range(
max([len(v_current_node["children"]), len(v_new_node["children"])])
):
v_current_node_child = (
v_current_node["children"][i]
if i < len(v_current_node["children"])
else None
)
v_new_node_child = (
v_new_node["children"][i] if i < len(v_new_node["children"]) else None
)
self.update_element(current_node, v_current_node_child, v_new_node_child, i)
def update_attributes(self, target_node, current_attributes, new_attributes):
for attr in list(set(current_attributes.keys()) - set(new_attributes)):
if self.is_event_attr(str(attr)):
continue
target_node.removeAttribute(str(attr))
for attr, value in new_attributes.items():
if (
self.is_event_attr(str(attr))
or current_attributes.get(str(attr)) == value
):
continue
target_node.setAttribute(str(attr), value)
def change_type(self, a, b):
if a.__class__.__name__ != b.__class__.__name__:
return self.ChangeType.TYPE
if not self.is_v_node(a) and a != b:
return self.ChangeType.TEXT
if self.is_v_node(a) and self.is_v_node(b):
if a["node_name"] != b["node_name"]:
return self.ChangeType.NODE
if a["attributes"].get("value") != b["attributes"].get("value"):
return self.ChangeType.VALUE
if a["attributes"] != b["attributes"]:
return self.ChangeType.ATTR
return self.ChangeType.NONE
def is_v_node(self, node):
return isinstance(node, dict)
def is_event_attr(self, attr: str):
return attr.startswith("on")
|
harehare/python-wasm-vdom
|
vdom.py
|
vdom.py
|
py
| 6,230 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.MutableMapping",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.TypedDict",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Sequence",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "typing.Sequence",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "typing.Sequence",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name": "typing.Callable",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "functools.partial",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "js.requestAnimationFrame",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "pyodide.ffi.create_once_callable",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "js.document.querySelector",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "js.document",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "enum.Enum",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "js.document.createTextNode",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "js.document",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "js.document.createElement",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "js.document",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "pyodide.ffi.create_proxy",
"line_number": 110,
"usage_type": "call"
}
] |
2785515345
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import logging as log
from time import sleep
from html import escape
import requests
class FloodException(Exception): pass
def send(config, post):
# prepare config
c = {'token': '', 'chat': '', 'maxlength': 1000, 'skip': [], 'censor': []}
c.update(config)
if not c['token'] or not c['chat']:
raise Exception('[Telegram] missing parameter')
# check key words
if any((x in post['title']) for x in c['skip']):
log.info('[Telegram] skip word hit')
return
if any((x in post['content']) for x in c['censor']):
log.info('[Telegram] censor word hit')
post['content'] = ''
# fixes email addresses and links
post['content'] = re.sub(r'([!-~]+\@[!-~]+)', ' \\1 ', post['content'])
post['content'] = re.sub(r'(https?://[!-~]+)', '\\1 ', post['content'])
# form message
html = f'<b>{escape(post["title"])}</b>\n'
html += f'{post["time"]} #{post["dept"]}\n'
html += f'<a href="{post["linkLAN"]}">校内链接</a> <a href="{post["linkVPN"]}">VPN链接</a>\n\n'
html += f'{escape(post["content"])}'
if len(html) > c['maxlength']: html = html[:c['maxlength']] + '...'
log.debug(f'[Telegram] html: {html}')
# call HTTP API
while True:
try:
r = requests.post('https://api.telegram.org/bot'+c['token']+'/sendMessage', json={
'chat_id': c['chat'],
'text': html,
'parse_mode': 'HTML',
'disable_web_page_preview': True
}, timeout=5)
log.debug(f'[Telegram] response: {r.text}')
if not r.json()['ok']:
if r.json()['error_code'] == 429:
raise FloodException()
else:
raise Exception(r.json()['error_code'])
break
except FloodException:
log.warning('[Telegram] hit rate limit!')
sleep(30)
except requests.exceptions.RequestException as e:
log.info('[Telegram] network error')
log.info(repr(e))
sleep(5)
except Exception as e:
log.error('[Telegram] unknown error')
log.error(repr(e))
sleep(60)
|
TechCiel/Reachee
|
sender/telegram.py
|
telegram.py
|
py
| 2,274 |
python
|
en
|
code
| 11 |
github-code
|
6
|
[
{
"api_name": "logging.info",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "html.escape",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "html.escape",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 65,
"usage_type": "call"
}
] |
26676860635
|
from flask import Flask, render_template, request, redirect
from flask_wtf import FlaskForm
from wtforms import StringField, HiddenField, RadioField
from wtforms.validators import Length, ValidationError
import phonenumbers
import random
import json
import os
app = Flask(__name__)
app.secret_key = 'Secret!'
hours = {"1-2": "1-2 часа", "3-5": "3-5 часов", "5-7": "5-7 часов", "7-10": "7-10 часов"}
days = {"mon": "Понедельник", "tue": "Вторник", "wed": "Среда", "thu": "Четверг", "fri": "Пятница", "sat": "Суббота", "sun": "Воскресенье"}
week = {'sun': 'sunday', 'mon': 'monday', 'tue': 'tuesday', 'wed': 'wednesday', 'thu': 'thursday', 'fri': 'friday', 'sat': 'saturday'}
def get_data():
with open("data.txt", "r") as d:
data = json.load(d)
return data
def check_phone(form, field):
number = form.clientPhone.data
print(number)
try:
if not phonenumbers.is_valid_number(phonenumbers.parse(number, 'RU')):
raise phonenumbers.NumberParseException(None, None)
except phonenumbers.NumberParseException:
raise ValidationError('Пожалуйста укажите номер телефона полностью (+7ХХХХХХХХХХ)')
def convert_day(day):
for key, value in week.items():
if value == day:
return key
# эта функция для добавления цели
def add_goal(id_list, new_goal_eng, new_goal_ru, new_goal_pic):
data = get_data()
data['goals'].update({new_goal_eng: new_goal_ru})
data['emodji'].update({new_goal_eng: new_goal_pic})
for gid in id_list:
if new_goal_eng not in data['teachers'][gid]['goals']:
data['teachers'][gid]['goals'].append(new_goal_eng)
out = {'goals': data['goals'], 'teachers': data['teachers'], 'emodji': data['emodji']}
with open("data.txt", "w") as f:
json.dump(out, f)
def add_callback(name, phone, goal, time):
records = []
if os.path.isfile('request.json'):
with open('request.json', 'r') as r:
records = json.load(r)
records.append({'name': name, 'phone': phone, 'goal': goal, 'time': time})
with open('request.json', 'w') as w:
json.dump(records, w)
def add_record(name, phone, teacher_id, day, time):
records = []
if os.path.isfile('booking.json'):
with open('booking.json', 'r') as r:
records = json.load(r)
records.append({'name': name, 'phone': phone, 'teacher': teacher_id, 'weekday': day, 'time': time})
with open('booking.json', 'w') as w:
json.dump(records, w)
class RequestForm(FlaskForm):
data = get_data()
clientName = StringField('Вас зовут', [Length(min=2, message='Пожалуйста укажите ваше имя')])
clientPhone = StringField('Ваш телефон', [check_phone])
time = RadioField('Сколько времени есть?', choices=[(key, value) for key, value in hours.items()], default='1-2')
goals = RadioField('Какая цель занятий?', choices=[(key, value) for key, value in data['goals'].items()], default='travel')
class BookingForm(FlaskForm):
clientName = StringField('Вас зовут', [Length(min=2, message='Пожалуйста укажите ваше имя')])
clientPhone = StringField('Ваш телефон', [check_phone])
clientWeekday = HiddenField()
clientTime = HiddenField()
clientTeacher = HiddenField()
@app.route('/')
def main():
# add_goal((8, 9, 10, 11), 'programming', 'Для программирования', '🖥') #<- Так добавлял цель
data = get_data()
random_teachers_ids = []
while len(random_teachers_ids) < 6:
i = random.randint(0, len(data['teachers'])-1)
if i not in random_teachers_ids:
random_teachers_ids.append(i)
return render_template('index.html', teachers=data['teachers'], ids=random_teachers_ids, pic=data['emodji'], goals=data['goals'])
@app.route('/all/')
def all_teachers():
data = get_data()
return render_template('index.html', teachers=data['teachers'], ids=[i for i in range(len(data['teachers']))], pic=data['emodji'], goals=data['goals'])
@app.route('/goals/<goal>/')
def show_goals(goal):
data = get_data()
sorted_list = []
for teacher in data['teachers']:
if goal in teacher['goals']:
sorted_list.append(teacher)
return render_template("goal.html", teachers=sorted_list, goals=data['goals'], goal=goal, pic=data['emodji'])
@app.route('/profiles/<int:teacher_id>/')
def show_profile(teacher_id):
data = get_data()
return render_template("profile.html", teacher=data['teachers'][teacher_id], goals=data['goals'], days=days, week=week)
@app.route('/request/')
def make_request():
form = RequestForm()
return render_template("request.html", form=form)
@app.route('/request_done/', methods=['POST', 'GET'])
def request_done():
form = RequestForm()
data = get_data()
if request.method == 'POST':
if form.validate_on_submit():
name = form.clientName.data
phone = form.clientPhone.data
goal = form.goals.data
time = form.time.data
add_callback(name, phone, goal, time)
return render_template("request_done.html", name=name, phone=phone, goal=data['goals'].get(goal), time=hours.get(time))
else:
return render_template("request.html", form=form)
else:
return render_template("request.html", form=form)
@app.route('/booking/<int:teacher_id>/<day>/<time>/')
def booking(teacher_id, day, time):
data = get_data()
what_day = convert_day(day)
time = time + ":00"
form = BookingForm(clientTime=time, clientWeekday=what_day, clientTeacher=teacher_id)
return render_template("booking.html", teacher=data['teachers'][teacher_id], day=what_day, time=time, days=days, form=form)
@app.route('/booking_done/', methods=['POST', 'GET'])
def booking_save():
data = get_data()
form = BookingForm()
if request.method == 'POST':
name = form.clientName.data
phone = form.clientPhone.data
day = form.clientWeekday.data
time = form.clientTime.data
teacher_id = int(form.clientTeacher.data)
if form.validate_on_submit():
add_record(name, phone, teacher_id, day, time)
return render_template("booking_done.html", name=name, phone=phone, day=days.get(day), time=time, teacher_id=teacher_id)
else:
return render_template("booking.html", teacher=data['teachers'][teacher_id], day=day, time=time, days=days, form=form)
else:
return redirect('/')
if __name__ == '__main__':
app.run()
|
maksimKnz/flask-project2
|
app.py
|
app.py
|
py
| 6,791 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "phonenumbers.is_valid_number",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "phonenumbers.parse",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "phonenumbers.NumberParseException",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "phonenumbers.NumberParseException",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "wtforms.validators.ValidationError",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "flask_wtf.FlaskForm",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "wtforms.StringField",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.Length",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "wtforms.StringField",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "wtforms.RadioField",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "wtforms.RadioField",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "flask_wtf.FlaskForm",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "wtforms.StringField",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.Length",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "wtforms.StringField",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "wtforms.HiddenField",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "wtforms.HiddenField",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "wtforms.HiddenField",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 171,
"usage_type": "call"
}
] |
7176555609
|
import functools
from typing import (
Any,
Callable,
TypeVar,
cast,
)
import warnings
TFunc = TypeVar("TFunc", bound=Callable[..., Any])
def deprecate_method(func: TFunc, message: str = None) -> TFunc:
@functools.wraps(func)
def deprecated_func(*args: Any, **kwargs: Any) -> Any:
warnings.warn(
category=DeprecationWarning,
message=(
message
or f"{func.__name__} is deprecated. "
"A breaking change is expected in a future release."
),
stacklevel=2,
)
func(*args, **kwargs)
return cast(TFunc, deprecated_func)
|
ethereum/py-evm
|
eth/tools/_utils/deprecation.py
|
deprecation.py
|
py
| 662 |
python
|
en
|
code
| 2,109 |
github-code
|
6
|
[
{
"api_name": "typing.TypeVar",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "typing.Callable",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "warnings.warn",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "typing.cast",
"line_number": 27,
"usage_type": "call"
}
] |
70073356027
|
from asyncio import coroutine, get_event_loop
from time import time
import requests
import json
class IngestaoDadosDogs:
def __init__(self, urls, dir):
self.urls = urls
self.dir = dir
@coroutine
def collect(self):
start = time()
loop = get_event_loop()
scrape_list = [loop.run_in_executor(None, requests.get, url) for url in self.urls]
for i, scrape in enumerate(scrape_list):
resp = yield from scrape
arquive = resp.json()
print(f'{i+1}: {resp.ok}')
with open(dir+f'{i+1}_File.json', 'w+') as file:
json.dump(arquive, file)
termino = time() - start
print(f'Fim da Operação: {termino:.2f} s')
# API que busca aleatoriamente imagem de cachorros na internet!
url_list = ['https://dog.ceo/api/breeds/image/random' for n in range(0,10)]
# Diretorio onda ira salvar o arquivo JSON
dir = "C:/Users/BlueShift/Desktop/Gustavo/Gustavo/MentoramaPythonPRO/MentoramaPythonPRO_MOD9/mod9pro/JsonConcorrentes/"
Dados = IngestaoDadosDogs(url_list, dir).collect()
loop = get_event_loop()
loop.run_until_complete(Dados)
|
gustavo-duarte-silva/MentoramaPythonPRO_MOD9
|
mod9pro/ScriptsAssincronos/scriptConcorrente.py
|
scriptConcorrente.py
|
py
| 1,155 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "time.time",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "asyncio.coroutine",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 31,
"usage_type": "call"
}
] |
27773589780
|
import os
import asyncio
from telepyrobot.setclient import TelePyroBot
from pyrogram import filters
from pyrogram.types import Message
from telepyrobot import COMMAND_HAND_LER
from telepyrobot.utils.pyrohelpers import ReplyCheck
__PLUGIN__ = os.path.basename(__file__.replace(".py", ""))
__help__ = f"""
`{COMMAND_HAND_LER}sdmsg <message> | <time in seconds>`
The command will automatically destruct the message after specified time.
"""
@TelePyroBot.on_message(filters.command("sdmsg", COMMAND_HAND_LER) & filters.me)
async def self_destruct(c: TelePyroBot, m: Message):
input_str = m.text.split(None, 1)[1]
rm = await m.edit_text("`Meking self-destruct msg...`")
ttl = 0
if input_str:
if "=" in input_str:
msg, ttl = input_str.split("|")
else:
await m.reply_text("__Check help to know how to use__")
return
sd_msg = await m.reply_text(f"{msg}", reply_to_message_id=ReplyCheck(m))
await rm.delete()
await asyncio.sleep(int(ttl))
await sd_msg.delete()
else:
await m.edit_text("__Check help to know how to use__")
return
|
Divkix/TelePyroBot
|
telepyrobot/plugins/self_destruct.py
|
self_destruct.py
|
py
| 1,145 |
python
|
en
|
code
| 40 |
github-code
|
6
|
[
{
"api_name": "os.path.basename",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "telepyrobot.COMMAND_HAND_LER",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "telepyrobot.setclient.TelePyroBot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "pyrogram.types.Message",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "telepyrobot.utils.pyrohelpers.ReplyCheck",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "telepyrobot.setclient.TelePyroBot.on_message",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "telepyrobot.setclient.TelePyroBot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pyrogram.filters.command",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "telepyrobot.COMMAND_HAND_LER",
"line_number": 18,
"usage_type": "argument"
},
{
"api_name": "pyrogram.filters",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pyrogram.filters.me",
"line_number": 18,
"usage_type": "attribute"
}
] |
32185442685
|
import logging
import flask
import time
import signal
import sys
import socket
from flask import Flask
from flask_api import status
from os import environ
from kubernetes import client, config
log = logging.getLogger()
log.addHandler(logging.StreamHandler())
log.setLevel(logging.INFO)
app = Flask(__name__)
try:
config.load_incluster_config()
v1 = client.CoreV1Api()
except Exception as e:
log.exception("K8s config not loaded")
@app.route('/')
def hello():
return f"Hello World!: {flask.request.remote_addr}, handler: {socket.gethostname()}"
@app.route('/health')
def health():
log.info(f"health check from: {flask.request.remote_addr}")
return "OK"
@app.route('/broken')
def sick():
return "BAD", status.HTTP_404_NOT_FOUND
@app.route('/slow/<leng>')
def broken(leng):
time.sleep(int(leng))
return "slow", status.HTTP_200_OK
@app.route('/pods')
def k8sapi():
# with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", 'r') as f:
# ns = f.readline()
# log.info(f"Reading PODs in ns: {ns}")
return [i for i in v1.list_pod_for_all_namespaces(watch=False).items]
def sigterm_handler(signum, frame):
log.info("SIGTERM received, stopping")
sys.exit()
if __name__ == '__main__':
if environ.get('FLASK_SETTINGS') is not None:
log.info("Found flask config")
app.config.from_envvar('FLASK_SETTINGS')
log.info(f"config: {app.config}")
signal.signal(signal.SIGTERM, sigterm_handler)
app.run(host="0.0.0.0")
|
kiemlicz/util
|
dockerfiles/experiments/web.py
|
web.py
|
py
| 1,526 |
python
|
en
|
code
| 20 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.StreamHandler",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "kubernetes.config.load_incluster_config",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "kubernetes.config",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "kubernetes.client.CoreV1Api",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "kubernetes.client",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "flask.request",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "socket.gethostname",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "flask_api.status.HTTP_404_NOT_FOUND",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "flask_api.status",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "flask_api.status.HTTP_200_OK",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "flask_api.status",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "signal.signal",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "signal.SIGTERM",
"line_number": 61,
"usage_type": "attribute"
}
] |
39620185683
|
# Importa e define oque for nescessário para o código
import pygame
import random
WIDTH = 880
HEIGHT = 660
from config import GAME, QUIT
def init_screen(screen):
# Variável para o ajuste de velocidade
clock = pygame.time.Clock()
# Carrega o fundo da tela inicial
background = pygame.image.load('Flying_Fox_Game/assets/img/tela de inicio final.png').convert()
background = pygame.transform.scale(background, (WIDTH, HEIGHT))
background_rect = background.get_rect()
running = True
while running:
# Ajusta a velocidade do jogo.
clock.tick(60)
# Processa os eventos (mouse, teclado, botão, etc).
for event in pygame.event.get():
# Verifica se foi fechado.
if event.type == pygame.QUIT:
state = QUIT
running = False
if event.type == pygame.KEYUP:
state = GAME
running = False
# A cada loop, redesenha o fundo e os sprites
#screen.fill(BLACK)
screen.blit(background, background_rect)
# Depois de desenhar tudo, inverte o display.
pygame.display.flip()
return state
|
RodrigoAnciaes/Flying_Fox_game
|
Flying_Fox_Game/first_screen.py
|
first_screen.py
|
py
| 1,182 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.time.Clock",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "config.QUIT",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "pygame.KEYUP",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "config.GAME",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "pygame.display.flip",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 41,
"usage_type": "attribute"
}
] |
10422651353
|
from __future__ import annotations
import copy
from dataclasses import dataclass
from enum import Enum
from typing import TYPE_CHECKING, Self
from randovania.bitpacking import bitpacking
from randovania.bitpacking.bitpacking import BitPackDecoder, BitPackEnum, BitPackValue
from randovania.bitpacking.type_enforcement import DataclassPostInitTypeCheck
from randovania.game_description import default_database
from randovania.game_description.db.dock import DockType, DockWeakness
from randovania.games.game import RandovaniaGame
from randovania.lib import enum_lib
if TYPE_CHECKING:
from collections.abc import Iterator
from randovania.game_description.db.dock import DockWeaknessDatabase
class DockRandoMode(BitPackEnum, Enum):
long_name: str
description: str
VANILLA = "vanilla"
DOCKS = "docks"
WEAKNESSES = "weaknesses"
enum_lib.add_long_name(
DockRandoMode,
{
DockRandoMode.VANILLA: "Vanilla",
DockRandoMode.DOCKS: "Doors",
DockRandoMode.WEAKNESSES: "Types",
},
)
enum_lib.add_per_enum_field(
DockRandoMode,
"description",
{
DockRandoMode.VANILLA: "Original door locks",
DockRandoMode.DOCKS: "Randomize the type of each door individually",
DockRandoMode.WEAKNESSES: "Randomizes all doors by type, turning all of one type into another",
},
)
@dataclass(frozen=True)
class DockTypeState(BitPackValue, DataclassPostInitTypeCheck):
game: RandovaniaGame
dock_type_name: str
can_change_from: set[DockWeakness]
can_change_to: set[DockWeakness]
@staticmethod
def _get_weakness_database(game: RandovaniaGame) -> DockWeaknessDatabase:
return default_database.game_description_for(game).dock_weakness_database
@property
def weakness_database(self) -> DockWeaknessDatabase:
return self._get_weakness_database(self.game)
@property
def dock_type(self) -> DockType:
return self.weakness_database.find_type(self.dock_type_name)
@property
def can_shuffle(self) -> bool:
return len(self.can_change_from) > 0
@property
def as_json(self) -> dict:
return {
"can_change_from": sorted(weakness.name for weakness in self.can_change_from),
"can_change_to": sorted(weakness.name for weakness in self.can_change_to),
}
@classmethod
def from_json(cls, value: dict, game: RandovaniaGame, dock_type_name: str) -> DockTypeState:
weakness_database = cls._get_weakness_database(game)
return cls(
game=game,
dock_type_name=dock_type_name,
can_change_from={
weakness_database.get_by_weakness(dock_type_name, weakness) for weakness in value["can_change_from"]
},
can_change_to={
weakness_database.get_by_weakness(dock_type_name, weakness) for weakness in value["can_change_to"]
},
)
def bit_pack_encode(self, metadata) -> Iterator[tuple[int, int]]:
yield from bitpacking.pack_sorted_array_elements(
sorted(self.can_change_from),
sorted(self.possible_change_from),
)
yield from bitpacking.pack_sorted_array_elements(
sorted(self.can_change_to),
sorted(self.possible_change_to),
)
@classmethod
def bit_pack_unpack(cls, decoder: BitPackDecoder, metadata) -> DockTypeState:
reference: DockTypeState = metadata["reference"]
ref_change_from = sorted(cls._possible_change_from(reference.game, reference.dock_type_name))
ref_change_to = sorted(cls._possible_change_to(reference.game, reference.dock_type_name))
return cls(
game=reference.game,
dock_type_name=reference.dock_type_name,
can_change_from=set(bitpacking.decode_sorted_array_elements(decoder, ref_change_from)),
can_change_to=set(bitpacking.decode_sorted_array_elements(decoder, ref_change_to)),
)
@staticmethod
def _possible_change_from(game: RandovaniaGame, dock_type_name: str) -> Iterator[DockWeakness]:
weakness_database = DockTypeState._get_weakness_database(game)
yield from weakness_database.dock_rando_params[weakness_database.find_type(dock_type_name)].change_from
@property
def possible_change_from(self) -> Iterator[DockWeakness]:
yield from self._possible_change_from(self.game, self.dock_type_name)
@staticmethod
def _possible_change_to(game: RandovaniaGame, dock_type_name: str) -> Iterator[DockWeakness]:
weakness_database = DockTypeState._get_weakness_database(game)
yield from weakness_database.dock_rando_params[weakness_database.find_type(dock_type_name)].change_to
@property
def possible_change_to(self) -> Iterator[DockWeakness]:
yield from self._possible_change_to(self.game, self.dock_type_name)
@dataclass(frozen=True)
class DockRandoConfiguration(BitPackValue, DataclassPostInitTypeCheck):
game: RandovaniaGame
mode: DockRandoMode
types_state: dict[DockType, DockTypeState]
@staticmethod
def _get_weakness_database(game: RandovaniaGame) -> DockWeaknessDatabase:
return default_database.game_description_for(game).dock_weakness_database
@property
def weakness_database(self) -> DockWeaknessDatabase:
return self._get_weakness_database(self.game)
@property
def as_json(self) -> dict:
return {
"mode": self.mode.value,
"types_state": {
dock_type.short_name: type_state.as_json for dock_type, type_state in self.types_state.items()
},
}
@classmethod
def from_json(cls, value: dict, game: RandovaniaGame) -> Self:
weakness_database = cls._get_weakness_database(game)
return cls(
game=game,
mode=DockRandoMode(value["mode"]),
types_state={
weakness_database.find_type(dock_type): DockTypeState.from_json(type_state, game, dock_type)
for dock_type, type_state in value["types_state"].items()
},
)
def bit_pack_encode(self, metadata) -> Iterator[tuple[int, int]]:
reference: DockRandoConfiguration = metadata["reference"]
yield from self.mode.bit_pack_encode(None)
modified_types = sorted(
dock_type
for dock_type, type_state in self.types_state.items()
if type_state != reference.types_state[dock_type]
)
yield from bitpacking.pack_sorted_array_elements(modified_types, sorted(self.weakness_database.dock_types))
for dock_type in modified_types:
yield from self.types_state[dock_type].bit_pack_encode({"reference": reference.types_state[dock_type]})
@classmethod
def bit_pack_unpack(cls, decoder: BitPackDecoder, metadata) -> Self:
reference: DockRandoConfiguration = metadata["reference"]
mode = DockRandoMode.bit_pack_unpack(decoder, None)
modified_types = bitpacking.decode_sorted_array_elements(
decoder, sorted(reference.weakness_database.dock_types)
)
types_state = copy.copy(reference.types_state)
for dock_type in modified_types:
types_state[dock_type] = DockTypeState.bit_pack_unpack(
decoder, {"reference": reference.types_state[dock_type]}
)
return cls(
game=reference.game,
mode=mode,
types_state=types_state,
)
def is_enabled(self) -> bool:
return self.mode != DockRandoMode.VANILLA
def can_shuffle(self, dock_type: DockType) -> bool:
return dock_type in self.weakness_database.dock_rando_params and self.types_state[dock_type].can_shuffle
def settings_incompatible_with_multiworld(self) -> list[str]:
danger = []
if self.mode == DockRandoMode.DOCKS:
danger.append(f"{self.mode.long_name}: {self.mode.description}")
return danger
|
randovania/randovania
|
randovania/layout/base/dock_rando_configuration.py
|
dock_rando_configuration.py
|
py
| 8,008 |
python
|
en
|
code
| 165 |
github-code
|
6
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "randovania.bitpacking.bitpacking.BitPackEnum",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "randovania.lib.enum_lib.add_long_name",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "randovania.lib.enum_lib",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "randovania.lib.enum_lib.add_per_enum_field",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "randovania.lib.enum_lib",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "randovania.bitpacking.bitpacking.BitPackValue",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "randovania.bitpacking.type_enforcement.DataclassPostInitTypeCheck",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "randovania.games.game.RandovaniaGame",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.db.dock.DockWeakness",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.db.dock.DockWeakness",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "randovania.games.game.RandovaniaGame",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.default_database.game_description_for",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "randovania.game_description.default_database",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.db.dock.DockWeaknessDatabase",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.db.dock.DockWeaknessDatabase",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.db.dock.DockType",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "randovania.games.game.RandovaniaGame",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "randovania.bitpacking.bitpacking.pack_sorted_array_elements",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "randovania.bitpacking.bitpacking",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "randovania.bitpacking.bitpacking.pack_sorted_array_elements",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "randovania.bitpacking.bitpacking",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "collections.abc.Iterator",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "randovania.bitpacking.bitpacking.BitPackDecoder",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "randovania.bitpacking.bitpacking.decode_sorted_array_elements",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "randovania.bitpacking.bitpacking",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "randovania.bitpacking.bitpacking.decode_sorted_array_elements",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "randovania.bitpacking.bitpacking",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "randovania.games.game.RandovaniaGame",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "collections.abc.Iterator",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.db.dock.DockWeakness",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "collections.abc.Iterator",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.db.dock.DockWeakness",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "randovania.games.game.RandovaniaGame",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "collections.abc.Iterator",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.db.dock.DockWeakness",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "collections.abc.Iterator",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.db.dock.DockWeakness",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "randovania.bitpacking.bitpacking.BitPackValue",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "randovania.bitpacking.type_enforcement.DataclassPostInitTypeCheck",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "randovania.games.game.RandovaniaGame",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.db.dock.DockType",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "randovania.games.game.RandovaniaGame",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.default_database.game_description_for",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "randovania.game_description.default_database",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.db.dock.DockWeaknessDatabase",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.db.dock.DockWeaknessDatabase",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "randovania.games.game.RandovaniaGame",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "typing.Self",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "randovania.bitpacking.bitpacking.pack_sorted_array_elements",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "randovania.bitpacking.bitpacking",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "collections.abc.Iterator",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "randovania.bitpacking.bitpacking.BitPackDecoder",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "randovania.bitpacking.bitpacking.decode_sorted_array_elements",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "randovania.bitpacking.bitpacking",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "copy.copy",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "typing.Self",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "randovania.game_description.db.dock.DockType",
"line_number": 210,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 137,
"usage_type": "call"
}
] |
741344040
|
import os
from pytest_mock import MockFixture
from app.config import get_config, DevelopmentConfig, ProductionConfig, PRODUCTION, DEVELOPMENT
def test_get_config_in_development():
app_config = get_config()
assert app_config.env == DEVELOPMENT
assert isinstance(app_config, DevelopmentConfig)
def test_get_config_in_production(mocker: MockFixture):
mocked_environ = {
'ENV': 'PROD',
'API_URL': 'http://test.com',
'API_EMAIL': '[email protected]',
'API_PASSWORD': 'pass123',
'BROKER_URI': 'broker.com',
'BROKER_PORT': '1232'
}
mocker.patch.dict(os.environ, mocked_environ)
app_config = get_config()
assert app_config.env == PRODUCTION
assert isinstance(app_config, ProductionConfig)
|
hrozan/utfpr-final-paper
|
legacy/smart-object/tests/test_config.py
|
test_config.py
|
py
| 771 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "app.config.get_config",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "app.config.DEVELOPMENT",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "app.config.DevelopmentConfig",
"line_number": 12,
"usage_type": "argument"
},
{
"api_name": "pytest_mock.MockFixture",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "app.config.get_config",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "app.config.PRODUCTION",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "app.config.ProductionConfig",
"line_number": 29,
"usage_type": "argument"
}
] |
34375411916
|
#!/usr/bin/env python3
from curses import wrapper
from functools import partial
from Gamepad import Gamepad
import curses
import time
def spd_scale(y0, x0, y1, x1, x):
"""Get the speed at the given value of time.
NOTE: The output is not clamped,
regardless of the reference points given."""
# Two point form of a line
# y-y0 = y2-y1/x2-x1 (x-x0)
m = (y1 - y0) / (x1 - x0)
c = y0
return c + m * (x - x0)
def main(stdscr):
"""Main loop for listening to input events and displaying speed using curses.
Takes in the window created by curses.
Call this function using curses' wrapper function in order to
not mess up terminal state on exceptions."""
# Curses Prelude
curses.noecho()
curses.cbreak()
curses.curs_set(False) # Don't show the cursor
stdscr.keypad(True)
stdscr.clear()
stdscr.nodelay(True) # Don't wait for ENTER to read input
stdscr.border()
spd_scale_c = partial(spd_scale, 100, 10, 0, 0)
w_down_counter = 0
speed = spd_scale_c(w_down_counter)
stdscr.attrset(curses.color_pair(1))
stdscr.addstr(0, curses.COLS // 2, "Motor Control", curses.A_DIM)
if not Gamepad.available():
print("Couldn't find a gamepad")
while not Gamepad.available():
time.sleep(1.0)
gamepad = Gamepad.Xbox360()
gamepad.startBackgroundUpdates()
try:
while True and gamepad.isConnected():
val = gamepad.axis("RT")
if gamepad.isPressed("A"):
w_down_counter = min(w_down_counter + 1, 10)
else:
w_down_counter = max(w_down_counter - 1, 0)
if gamepad.beenPressed("B"):
break
speed = spd_scale_c(w_down_counter)
stdscr.addstr(
4, curses.COLS // 2, f"Current speed is {speed}", curses.A_BOLD
)
stdscr.refresh()
stdscr.refresh()
time.sleep(0.1)
finally:
gamepad.disconnect()
wrapper(main)
|
kknives/rudra-training
|
motor-control/js_control.py
|
js_control.py
|
py
| 2,024 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "curses.noecho",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "curses.cbreak",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "curses.curs_set",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "curses.color_pair",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "curses.COLS",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "curses.A_DIM",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "Gamepad.Gamepad.available",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "Gamepad.Gamepad",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "Gamepad.Gamepad.available",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "Gamepad.Gamepad",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "Gamepad.Gamepad.Xbox360",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "Gamepad.Gamepad",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "curses.COLS",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "curses.A_BOLD",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "curses.wrapper",
"line_number": 71,
"usage_type": "call"
}
] |
40880830024
|
from bs4 import BeautifulSoup
import requests,re,os
import textract
os.chdir("..")
DATA_DIR = "data/fiscal_pdf"
SAVE_DIR = "data/fiscal_txt"
pre_link="http://www.imf.org"
target_dir = "http://www.imf.org/external/np/g20/"
g8_link = "http://www.g8.utoronto.ca/summit/index.htm"
# Extract all IMF Staff Note from IMF website
html = requests.get(target_dir)
soup = BeautifulSoup(html.content, "lxml")
link_imf = soup.findAll("a",text=re.compile('IMF Staff Note to G-20'))
def extract_pdf(r,link,datdir,savedir):
name = link.split("/")[-1].split(".")[0][:6]
name_format = "20"+name[-2:]+name[:2]+name[2:4]
name_pdf = name_format + ".pdf"
name_txt = name_format + ".txt"
save_dir = os.path.join(datdir,name_pdf)
save_txt = os.path.join(savedir,name_txt)
with open(save_dir, 'wb') as f:
f.write(r.content)
text = textract.process(save_dir)
with open(save_txt, 'wb') as f:
f.write(text)
for i in link_imf:
if "pdf" in i["href"]:
if "www" in i["href"]:
r = requests.get(i["href"], stream=True)
extract_pdf(r,i["href"],DATA_DIR,SAVE_DIR)
else:
link_temp=pre_link + i["href"]
r = requests.get(link_temp,stream=True)
extract_pdf(r,i["href"],DATA_DIR,SAVE_DIR)
else:
if "www" in i["href"]:
r = requests.get(i["href"], stream=True)
else:
link_temp=pre_link + i["href"]
r = requests.get(link_temp,stream=True)
temp=BeautifulSoup(r.content, "lxml")
link_temp=temp.find(text=re.compile("Read the"))
link_temp=link_temp.parent.a["href"]
if "external" in link_temp:
link_temp=pre_link+link_temp
else:
link_temp=target_dir+link_temp
r = requests.get(link_temp,stream=True)
extract_pdf(r,link_temp,DATA_DIR,SAVE_DIR)
|
utipe/imf_fiscal
|
code/corpus_extraction.py
|
corpus_extraction.py
|
py
| 1,880 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.chdir",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "textract.process",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 51,
"usage_type": "call"
}
] |
40449028487
|
from ray.rllib.evaluation import MultiAgentEpisode, RolloutWorker
from ray.rllib.agents.callbacks import DefaultCallbacks
from ray.rllib.env import BaseEnv
from ray.rllib.policy import Policy
from typing import Dict
from ray.rllib.policy.sample_batch import SampleBatch
import numpy as np
import time
import csv
import os
def trainEvlogs2csv(folder_name,file_name,csvDict, n_episode):
fieldnames = list(csvDict.keys())
if n_episode == 1:
#print('this happens')
csvfile = open(folder_name + file_name, 'w', newline='')
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
else:
csvfile = open(folder_name + file_name, 'a', newline='')
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(csvDict)
class MyCallbacks(DefaultCallbacks):
def on_episode_start(self, *, worker: RolloutWorker, base_env: BaseEnv,
policies: Dict[str, Policy],
episode: MultiAgentEpisode, **kwargs): # , env_index: int, **kwargs):
# print("episode {} (env-idx) started.".format(episode.episode_id))
episode.user_data["step"] = 0
#episode.user_data["nb_communications_episode"] = 0
#episode.user_data["collisions"] = 0
#episode.user_data["list_communications"] = []
#episode.user_data["comms_per_step"] = []
#episode.user_data["histogram_communications"] = np.zeros(policies["policy_0"].config["horizon"]+1)#np.zeros(101)
episode.user_data["secs_per_episode"] = time.time()
episode.user_data["mean_secs_per_ts"] = 0
episode.user_data["auxiliary_time"] = time.time()
episode.user_data["auxiliary_time_episode"] = time.time()
def on_episode_step(self, *, worker: RolloutWorker, base_env: BaseEnv,
episode: MultiAgentEpisode, **kwargs):
auxtime_ts = time.time() - episode.user_data["auxiliary_time"]
episode.user_data["mean_secs_per_ts"] += auxtime_ts
aux = episode.last_observation_for(0)
aux12 = aux.reshape((1,aux.shape[0]))
#aux2 = episode.last_action_for(0)
#aux3 = episode.last_info_for(0)
n_agents = worker.env.num_agents
episode.batch_builder.count += n_agents - 1 ### THIS ONE IS THE ONE TO CHANGE!!
#collisions = 0
if episode.user_data["step"] != 0:
for i in range(n_agents):
aux4 = episode.policy_for(i)
#episode.user_data["nb_communications_episode"] += episode.last_info_for(i)["communication"]
#collisions += episode.last_info_for(i)["collisions"]
#for ii in range(int(episode.last_info_for(i)["communication"])):
# episode.user_data["comms_per_step"].append(episode.user_data["step"])
#episode.user_data["histogram_communications"][episode.user_data["step"]] += episode.last_info_for(i)["communication"]
#episode.user_data["list_communications"].append(episode.user_data["nb_communications_episode"])
#episode.user_data["collisions"] += collisions / 2
#if episode.user_data["collisions"] != 0:
# print("COLLISION!")
episode.user_data["step"] += 1
#print(episode.user_data["step"])
debug = False
if debug:
workerdebug = worker
policy_index = worker.policies_to_train[0]
policydebug = worker.policy_map[policy_index]
wdebug = policydebug.get_weights()
predicate = policydebug.loss_initialized()
if predicate:
# overwrite default VF prediction with the central VF
#sample_batch[SampleBatch.VF_PREDS] = policy.compute_central_vf(
#sample_batch[SampleBatch.CUR_OBS], sample_batch[OPPONENT_OBS],
#sample_batch[OPPONENT_ACTION])
t1 = time.time()
#encoder_debug = policydebug.compute_encoding_layer(aux12)
#print(encoder_debug)
t1 = time.time()-t1
t2 = time.time()
action_debug = policydebug.compute_action(aux12)
output_inputs = policydebug.output_inputs(aux12)
t2 = time.time() - t2
#print("WEIGHTS")
#print(wdebug)
#print("END WEIGHTS")
episode.user_data["auxiliary_time"] = time.time()
def on_episode_end(self, *, worker: RolloutWorker, base_env: BaseEnv,
policies: Dict[str, Policy], episode: MultiAgentEpisode, **kwargs):
#horizon = policies["policy_0"].config["horizon"]
#print("episode time:",time.time() - episode.user_data["auxiliary_time_episode"])
policy_index = worker.policies_to_train[0]
episode.custom_metrics["secs_per_episode"] = time.time()-episode.user_data["secs_per_episode"]
#episode.custom_metrics["mean_secs_per_ts"] = episode.user_data["mean_secs_per_ts"] / policies["policy_0"].config["horizon"] #100.0
episode.custom_metrics["mean_secs_per_ts"] = episode.user_data["mean_secs_per_ts"] / \
policies[policy_index].config["horizon"] # 100.0
episode.custom_metrics["nb targets tracked"] = np.sum(worker.env.num_targets_tracked[0])
episode.custom_metrics["episode length"] = worker.env.steps
episode.custom_metrics["success_rate"] = worker.env.success
episode.custom_metrics["entropy"] = worker.env.last_entropy_log
if worker.env.ntargets == 3:
episode.custom_metrics["nb_targets_tracked_3targets"] = np.sum(worker.env.num_targets_tracked[0])
episode.custom_metrics["episode length_3targets"] = worker.env.steps
if worker.env.ntargets == 6:
episode.custom_metrics["nb_targets_tracked_6targets"] = np.sum(worker.env.num_targets_tracked[0])
episode.custom_metrics["episode length_6targets"] = worker.env.steps
if worker.env.ntargets == 9:
episode.custom_metrics["nb_targets_tracked_9targets"] = np.sum(worker.env.num_targets_tracked[0])
episode.custom_metrics["episode length_9targets"] = worker.env.steps
if worker.env.ntargets == 12:
episode.custom_metrics["nb_targets_tracked_12targets"] = np.sum(worker.env.num_targets_tracked[0])
episode.custom_metrics["episode length_12targets"] = worker.env.steps
#episode.custom_metrics["nb_communications_episode"] = episode.user_data["nb_communications_episode"]
#episode.custom_metrics["collisions"] = episode.user_data["collisions"]
#episode.hist_data["communications_histogram"] = episode.user_data["list_communications"]
#episode.hist_data["communications_per_step"] = episode.user_data["comms_per_step"]
#if episode.user_data["collisions"] == 0:
# episode.custom_metrics["success"] = 1
#else:
# episode.custom_metrics["success"] = 0
#episode.custom_metrics["scn_"+str(base_env.envs[0]._env.world.current_scenario)+"_success"] = episode.custom_metrics["success"]
#episode.custom_metrics["scn_" + str(base_env.envs[0]._env.world.current_scenario) + "_nb_comm_episode"] = episode.custom_metrics["nb_communications_episode"]
#nEpisodesxScenario = worker.policy_config['train_batch_size']/(base_env.envs[0]._env.num_agents*policies["policy_0"].config["horizon"])
#if base_env.envs[0]._env.episode_id % nEpisodesxScenario == 0 and base_env.envs[0]._env.world.test == 1: # Alternatively use the in_evaluation value in config
#if base_env.envs[0]._env.world.time_step % (worker.policy_config['train_batch_size']/base_env.envs[0]._env.num_agents) == 0 and worker.policy_config["model"]["custom_model_config"]["training"]==False: # Alternatively use the in_evaluation value in config
#if base_env.envs[0]._env.world.time_step % (worker.policy_config['train_batch_size']) == 0 and worker.policy_config["model"]["custom_model_config"]["training"] == False: # Alternatively use the in_evaluation value in config
#base_env.envs[0]._env.world.next_eval_scenario()
#base_env.envs[0]._env.world.set_eval_scenario()
# training_eval_logs = base_env.envs[0]._env.world.test
# if worker.policy_config["model"]["custom_model_config"]["training"]==False and training_eval_logs == True:
# goal_achieved = 1
# for i in range(worker.env.num_agents):
# if episode.last_info_for(i)["goal_achieved"] == 0:
# goal_achieved = 0
#
# if episode.custom_metrics["success"] == 0:
# goal_achieved = 0
#
# timesteps = episode.last_info_for(0)["step"]
#
# checkpoint = policies["policy_0"].config["model"]["custom_model_config"]["checkpoint"]
# scenario = base_env.envs[0]._env.world.current_scenario
# folder_name = base_env.envs[0]._env.folder_name + 'training_eval/'
# file_name = 'checkpoint_' + str(checkpoint) + '.csv'
# n_episode = base_env.envs[0]._env.episode_id
# csvDict = {
# 'scenario': scenario,
# 'ep_safety': episode.custom_metrics["success"],
# 'ep_nb_communications': episode.custom_metrics["nb_communications_episode"],
# 'ep_goal_achieved': goal_achieved,
# 'step': timesteps
# }
# for i in range(policies["policy_0"].config["horizon"]+1): #101):
# csvDict[str(i)] = episode.user_data["histogram_communications"][i]
#
# if not os.path.isdir(folder_name):
# os.makedirs(folder_name)
# trainEvlogs2csv(folder_name,file_name,csvDict, n_episode)
#print("episode END")
#"""
def on_sample_end(self, *, worker: RolloutWorker, samples: SampleBatch,
**kwargs):
print("returned sample batch of size {}".format(samples.count))
#"""
def on_train_result(self, *, trainer, result: dict, **kwargs):
#print("trainer.train() result: {} -> {} episodes".format(trainer, result["episodes_this_iter"]))
work = trainer.workers.local_worker()
policy_index = work.policies_to_train[0]
policydebug = work.policy_map[policy_index]
wdebug = policydebug.get_weights()
#kernelweights = wdebug['default_policy/dense/kernel']
#if np.any(np.isnan(kernelweights)):
# print("here's a nan")
# if trainer.config["model"]["custom_model_config"]["training"]:
# if 313 <= trainer.iteration < 313 * 2: # change distribution of sampled episodes 15000 episodes
# work.foreach_env(lambda env: env._env.world.set_scenario_distr([0.25, 0.75, 0]))
# if 313 * 2 <= trainer.iteration < 313 * 3: # change distribution of sampled episodes 15000 episodes
# work.foreach_env(lambda env: env._env.world.set_scenario_distr([0.125, 0.125, 0.75]))
# if 313 * 3 <= trainer.iteration < 313 * 4: # change distribution of sampled episodes 15000 episodes
# work.foreach_env(lambda env: env._env.world.set_scenario_distr([0.0, 0.25, 0.75]))
# if 313 * 4 <= trainer.iteration: # change distribution of sampled episodes 15000 episodes
# work.foreach_env(lambda env: env._env.world.set_scenario_distr([0.0, 0.0, 1.0]))
# you can mutate the result dict to add new fields to return
result["callback_ok"] = True
"""
def on_postprocess_trajectory(
self, *, worker: RolloutWorker, episode: MultiAgentEpisode,
agent_id: str, policy_id: str, policies: Dict[str, Policy],
postprocessed_batch: SampleBatch,
original_batches: Dict[str, SampleBatch], **kwargs):
print("postprocessed {} steps".format(postprocessed_batch.count))
if "num_batches" not in episode.custom_metrics:
episode.custom_metrics["num_batches"] = 0
episode.custom_metrics["num_batches"] += 1
#"""
|
tud-amr/AC-LCP
|
utils/callbacks.py
|
callbacks.py
|
py
| 12,175 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "csv.DictWriter",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "csv.DictWriter",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "ray.rllib.agents.callbacks.DefaultCallbacks",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "ray.rllib.evaluation.RolloutWorker",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "ray.rllib.env.BaseEnv",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "ray.rllib.policy.Policy",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "ray.rllib.evaluation.MultiAgentEpisode",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "ray.rllib.evaluation.RolloutWorker",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "ray.rllib.env.BaseEnv",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "ray.rllib.evaluation.MultiAgentEpisode",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "ray.rllib.evaluation.RolloutWorker",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "ray.rllib.env.BaseEnv",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "ray.rllib.policy.Policy",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "ray.rllib.evaluation.MultiAgentEpisode",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "ray.rllib.evaluation.RolloutWorker",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "ray.rllib.policy.sample_batch.SampleBatch",
"line_number": 177,
"usage_type": "name"
}
] |
27241749881
|
import pygal
from random import randint
class Die:
def __init__(self, sides=6):
self.sides = sides
def roll(self):
return randint(1, self.sides)
die = Die(8)
die_1 = Die(8)
results = [die.roll() + die_1.roll() for x in range(1000)]
frequencies = [results.count(x) for x in range(2, 2 * die.sides + 1)]
# pygal.bar
hist = pygal.Bar()
hist.title = 'Rolling D6 - 1000'
hist.x_labels = [str(x) for x in range(2, 2 * die.sides + 1)]
hist.x_title = 'Sides'
hist.y_title = 'Frequency'
hist.add('D6', frequencies)
hist.render_to_file('recap_die_2d8.svg')
|
mbrad26/Data-Vizualization
|
Recap/recap_die_2d8.py
|
recap_die_2d8.py
|
py
| 580 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "random.randint",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygal.Bar",
"line_number": 22,
"usage_type": "call"
}
] |
39620196443
|
# ===== Inicialização =====
# ----- Importa e inicia pacotes
import pygame
import random
pygame.init()
# ----- Gera tela principal
WIDTH = 880
HEIGHT = 800
window = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption('Flying_Fox')
gravity = 1
difficult = 0
# ----- Inicia assets
METEOR_WIDTH = 100
METEOR_HEIGHT = random.randint(300, 450)
font = pygame.font.SysFont(None, 48)
background = pygame.image.load('Folder_de_Testes/assets/img/snow_day.jpeg').convert()
background = pygame.transform.scale(background, (WIDTH, HEIGHT))
meteor_img = pygame.image.load('Folder_de_Testes/assets/img/Tree.png').convert_alpha()
meteor_img = pygame.transform.scale(meteor_img, (METEOR_WIDTH, METEOR_HEIGHT))
# ----- Inicia estruturas de dados
# Definindo os novos tipos
class Fox(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
Fox_WIDTH = 70
Fox_HEIGHT = 70
Fox1 = pygame.image.load('Folder_de_Testes/assets/img/raposafinal.png').convert_alpha()
Fox1 = pygame.transform.scale(Fox1, (Fox_WIDTH, Fox_HEIGHT))
Fox2 = pygame.image.load('Folder_de_Testes/assets/img/snowflake.png').convert_alpha()
Fox2 = pygame.transform.scale(Fox2, (Fox_WIDTH, Fox_HEIGHT))
Fox3 = pygame.image.load('Folder_de_Testes/assets/img/Fox.jpeg').convert_alpha()
Fox3 = pygame.transform.scale(Fox3, (Fox_WIDTH, Fox_HEIGHT))
self.images = [Fox1,Fox2,Fox3]
self.image = Fox1
self.rect = self.image.get_rect()
self.rect.centerx = WIDTH / 4
self.rect.bottom = HEIGHT - 200
self.speedy = 1
self.now_on_windon = 0
def update(self):
self.rect.y += self.speedy
self.speedy += gravity + 0.1 * (-self.speedy)
self.now_on_windon = (self.now_on_windon + 1) % 3
self.image = self.images[self.now_on_windon]
self.mask = pygame.mask.from_surface(self.image)
# Mantem dentro da tela
if self.rect.bottom > HEIGHT:
pygame.QUIT
self.rect.bottom = HEIGHT
#game = False
if self.rect.top < 0:
pygame.QUIT
self.rect.top = 0
def pulo(self):
self.speedy += -18
class Meteor(pygame.sprite.Sprite):
def __init__(self, img):
# Construtor da classe mãe (Sprite).
pygame.sprite.Sprite.__init__(self)
METEOR_HEIGHT = random.randint(50, 250)
self.image = img
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect()
self.rect.x = (WIDTH-METEOR_WIDTH)
self.rect.y = random.randint(10,HEIGHT)
self.speedx = random.randint(-5, -3)
METEOR_HEIGHT = random.randint(50, 250)
def update(self):
# Atualizando a posição do meteoro
self.rect.x += self.speedx
# Se o meteoro passar do final da tela, volta para cima e sorteia
# novas posições e velocidades
if self.rect.top > HEIGHT or self.rect.right < 0 or self.rect.left > WIDTH:
self.rect.x = (WIDTH-METEOR_WIDTH)
self.rect.y = random.randint(10,HEIGHT)
self.speedx = random.randint(-5, -3)
game = True
# Variável para o ajuste de velocidade
clock = pygame.time.Clock()
FPS = 15
# Criando um grupo de meteoros
all_sprites = pygame.sprite.Group()
all_meteors = pygame.sprite.Group()
# Criando o jogador
player = Fox()
all_sprites.add(player)
# Criando os meteoros
for i in range(2):
meteor = Meteor(meteor_img)
all_sprites.add(meteor)
all_meteors.add(meteor)
# ===== Loop principal =====
while game:
fpdif = FPS + difficult
print(fpdif)
clock.tick(fpdif)
#print(clock)
difficult += 0.01
# ----- Trata eventos
for event in pygame.event.get():
# ----- Verifica consequências
if event.type == pygame.QUIT:
game = False
# Verifica se apertou alguma tecla.
# Verifica se soltou alguma tecla.
if event.type == pygame.KEYUP:
# Dependendo da tecla, altera a velocidade.
if event.key == pygame.K_SPACE:
Fox.pulo(player)
# ----- Atualiza estado do jogo
# Atualizando a posição dos meteoros
all_sprites.update()
hits = pygame.sprite.spritecollide(player,all_meteors,True, pygame.sprite.collide_mask)
if len(hits) > 0:
game = False
# ----- Gera saídas
window.fill((0, 0, 0)) # Preenche com a cor branca
window.blit(background, (0, 0))
# Desenhando meteoros
all_sprites.draw(window)
pygame.display.update() # Mostra o novo frame para o jogador
# ===== Finalização =====
pygame.quit() # Função do PyGame que finaliza os recursos utilizados
|
RodrigoAnciaes/Flying_Fox_game
|
Folder_de_Testes/Flappy.py
|
Flappy.py
|
py
| 4,902 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.init",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pygame.font.SysFont",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Sprite.__init__",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "pygame.mask.from_surface",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "pygame.mask",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Sprite.__init__",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "pygame.mask.from_surface",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "pygame.mask",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "pygame.time.Clock",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Group",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Group",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "pygame.KEYUP",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_SPACE",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.spritecollide",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 162,
"usage_type": "call"
}
] |
72370696507
|
from django.urls import path
from products.views import (
CreateProduct,
CreateOption,
Products,
UpdateProduct,
Option,
UpdateOption,
)
urlpatterns = [
path("", Products.as_view()),
path("create/product", CreateProduct.as_view()),
path("update/product/<int:pk>", UpdateProduct.as_view()),
path("options", Option.as_view()),
path("create/option", CreateOption.as_view()),
path("update/option/<int:pk>", UpdateOption.as_view()),
]
|
ohnas/Manager-backend
|
products/urls.py
|
urls.py
|
py
| 478 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "products.views.Products.as_view",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "products.views.Products",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "products.views.CreateProduct.as_view",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "products.views.CreateProduct",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "products.views.UpdateProduct.as_view",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "products.views.UpdateProduct",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "products.views.Option.as_view",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "products.views.Option",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "products.views.CreateOption.as_view",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "products.views.CreateOption",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "products.views.UpdateOption.as_view",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "products.views.UpdateOption",
"line_number": 17,
"usage_type": "name"
}
] |
5759869864
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
#%% Importar los datos (leer la imagen)
img = mpimg.imread('../Data/indice.png')
plt.imshow(img)
#%% reordenar la imagen en una sola tabla
d = img.shape
img_col = np.reshape(img,(d[0]*d[1],d[2]))
#%% Convertir los datos a media cero
media = img_col.mean(axis=0)
img_m = img_col - media
#%% obtener la matriz de covarianzas
img_cov = np.cov(img_m,rowvar=False)
#%% Obtener valores propios y vectores propios
w,v = np.linalg.eig(img_cov)
#%% Analizar los componentes principales
porcentaje = w/np.sum(w)
#%% Comprimir la imagen
componentes = w[0]
M_trans = np.reshape(v[:,0],(4,1))
img_new = np.matrix(img_m)*np.matrix(M_trans)
#%% Recuperar la imagen y visualizarla
img_recuperada = np.matrix(img_new)*np.matrix(M_trans.transpose())
img_recuperada = img_recuperada+media
img_r = img.copy()
for i in np.arange(4):
img_r[:,:,i] = img_recuperada[:,i].reshape((d[0],d[1]))
plt.subplot(121)
plt.imshow(img)
plt.subplot(122)
plt.imshow(img_r)
|
OscarFlores-IFi/CDINP19
|
code/p13.py
|
p13.py
|
py
| 1,039 |
python
|
es
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "matplotlib.image.imread",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "matplotlib.image",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "numpy.reshape",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.cov",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.eig",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.matrix",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.matrix",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 43,
"usage_type": "name"
}
] |
40464221544
|
import os.path
import re
import numpy as np
from PIL import Image
NUM_RE = re.compile(r'(\d+)')
maxint = 999999
WHITE_LIST_FORMATS = {'png', 'jpg', 'jpeg', 'bmp'}
def hstack_images(input_filenames, target_size=(224, 224)):
"""
Horizontally stack all images from @input_filenames in order and write to @output_filename
"""
images = list(map(lambda i: i.resize(target_size), map(Image.open, input_filenames)))
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths)
max_height = max(heights)
new_im = Image.new('RGB', (total_width, max_height))
x_offset = 0
for im in images:
new_im.paste(im, (x_offset, 0))
x_offset += im.size[0]
return new_im
def should_include_image(path, start_num, end_num):
"""Returns true if an image path should be included in the set, false
otherwise.
"""
fname = path.lower()
for extension in WHITE_LIST_FORMATS:
if fname.endswith('.' + extension):
num_match = NUM_RE.search(fname)
if num_match:
num, = num_match.groups()
num = int(num, 10)
return start_num <= num <= end_num
return False
def flow_from_directory(directory, a, b, c, target_size=(224, 224)):
for dirpath, dirnames, fnames in os.walk(directory):
if len(dirnames) == 0:
# we are at a top-level directory, extract the images in our range
xs = []
ys = []
for fname in fnames:
if should_include_image(fname, a, b):
xs.append(os.path.join(dirpath, fname))
elif should_include_image(fname, b + 1, c):
ys.append(os.path.join(dirpath, fname))
xs.sort()
ys.sort()
if not xs or not ys:
continue
x_imgs = np.asarray(hstack_images(xs, target_size=target_size))
y_imgs = np.asarray(hstack_images(ys, target_size=target_size))
yield (x_imgs, y_imgs)
|
eklitzke/dnn-fastai-project
|
vidextend/flow.py
|
flow.py
|
py
| 2,047 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "re.compile",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "PIL.Image.new",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "os.path.walk",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "os.path.path.join",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "os.path.path.join",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "numpy.asarray",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 69,
"usage_type": "call"
}
] |
8595213301
|
import openpyxl,os,shutil
import pandas as pd
import pymssql
from sqlalchemy import create_engine
backupPath='files-backup'
def get_xlsx_to_dataframe(fliename):
wb = openpyxl.load_workbook(fliename)
sheets = wb.sheetnames
ws = wb.get_sheet_by_name(sheets[0])
df = pd.read_excel(fliename)
if not os.path.exists(backupPath):
os.mkdir(backupPath)
shutil.move(fliename,os.path.join(backupPath,os.path.basename(fliename)))
return df
def dataframe_to_mssql(df, tablename):
engine = create_engine('mssql+pymssql://sa:[email protected]/database?tds_version=7.0')
df.to_sql(tablename, engine, if_exists='append', index=False)
|
zjz7304/xlsx_to_database
|
util.py
|
util.py
|
py
| 666 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "openpyxl.load_workbook",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 22,
"usage_type": "call"
}
] |
2687351802
|
import numpy as np
import matplotlib.pyplot as plt
import random as rnd
import turtle
N = 100
Nw = 1000
def wedrowniczek(length):
x0, y0 = 0,0
x,y = x0,y0
walkx,walky = [x],[y]
for i in range(length):
rand = rnd.randint(1,4)
if rand == 1:
x += 1
elif rand == 2:
y += 1
elif rand == 3:
x += -1
elif rand == 4:
y += -1
walkx.append(x)
walky.append(y)
return [walkx,walky]
lastPositions = []
for i in range(Nw):
walk = wedrowniczek(N)
lastPositions.append([walk[0][-1],walk[1][-1]])
plt.plot(walk[0],walk[1])
lastPositionInStart = 0
for lastPos in lastPositions:
if lastPos[0] == 0 and lastPos[-1] == 0:
lastPositionInStart += 1
print(lastPositionInStart)
plt.axis([-30,30,-30,30])
plt.grid()
plt.show()
|
filipmalecki94/Computer_modeling
|
lista3/zadanie1.py
|
zadanie1.py
|
py
| 784 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "random.randint",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
}
] |
29358868643
|
from sqlalchemy.orm import joinedload
from clld.db.util import get_distinct_values
from clld.db.models import common
from clld.web import datatables
from clld.web.datatables.base import LinkCol, Col, LinkToMapCol
from clld.web.datatables.parameter import Parameters
from clld.web.datatables.value import Values, ValueNameCol
from clld_glottologfamily_plugin.models import Family
from clld_glottologfamily_plugin.datatables import FamilyCol
from plansa import models
class Languages(datatables.Languages):
def base_query(self, query):
return query.join(Family).options(joinedload(models.Variety.family)).distinct()
def col_defs(self):
return [
LinkCol(self, 'name'),
FamilyCol(self, 'Family', models.Variety),
Col(self,
'latitude',
sDescription='<small>The geographic latitude</small>'),
Col(self,
'longitude',
sDescription='<small>The geographic longitude</small>'),
LinkToMapCol(self, 'm'),
]
class Taxa(Parameters):
def col_defs(self):
res = [
LinkCol(self, 'name'),
Col(self, 'english', model_col=models.Taxon.name_english),
]
for label, col in [
('kingdom', models.Taxon.kingdom),
('phylum', models.Taxon.phylum),
('class', models.Taxon.class_),
('order', models.Taxon.order),
('family', models.Taxon.family),
#('genus', models.Taxon.genus),
]:
res.append(Col(self, label, model_col=col, choices=get_distinct_values(col)))
return res
class Names(Values):
def col_defs(self):
if self.language:
return [
ValueNameCol(self, 'value'),
LinkCol(self,
'parameter',
sTitle='Scientific name',
model_col=common.Parameter.name,
get_object=lambda i: i.valueset.parameter),
Col(self,
'english',
model_col=models.Taxon.name_english,
get_object=lambda i: i.valueset.parameter),
Col(self,
'spanish',
model_col=models.Taxon.name_spanish,
get_object=lambda i: i.valueset.parameter),
Col(self,
'portuguese',
model_col=models.Taxon.name_portuguese,
get_object=lambda i: i.valueset.parameter),
]
return Values.col_defs(self)
def includeme(config):
config.register_datatable('values', Names)
config.register_datatable('parameters', Taxa)
config.register_datatable('languages', Languages)
|
tsammalex/plansa
|
plansa/datatables.py
|
datatables.py
|
py
| 2,802 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "clld.web.datatables.Languages",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "clld.web.datatables",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "clld_glottologfamily_plugin.models.Family",
"line_number": 18,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.orm.joinedload",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "plansa.models.Variety",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "plansa.models",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.LinkCol",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "clld_glottologfamily_plugin.datatables.FamilyCol",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "plansa.models.Variety",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "plansa.models",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.Col",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.base.Col",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.base.LinkToMapCol",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.parameter.Parameters",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.LinkCol",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.base.Col",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "plansa.models.Taxon",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "plansa.models",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "plansa.models.Taxon",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "plansa.models",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "plansa.models.Taxon",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "plansa.models",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "plansa.models.Taxon",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "plansa.models",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "plansa.models.Taxon",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "plansa.models",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "plansa.models.Taxon",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "plansa.models",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.Col",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "clld.db.util.get_distinct_values",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.value.Values",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.value.ValueNameCol",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.base.LinkCol",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "clld.db.models.common.Parameter",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.Col",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "plansa.models.Taxon",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "plansa.models",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.Col",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "plansa.models.Taxon",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "plansa.models",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.Col",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "plansa.models.Taxon",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "plansa.models",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.value.Values.col_defs",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.value.Values",
"line_number": 75,
"usage_type": "name"
}
] |
23248318495
|
import pygame
#necessary pygame initializing
pygame.init()
#create a surface that will be seen by the user
screen = pygame.display.set_mode((600, 400))
background= pygame.image.load('Background-1.png')
#create a varible for degrees pf rotation
degree = 0
while True:
screen.blit(background, (0,0))
#create shapes so you can tell rotation is happenning
ball = pygame.image.load('arrow.png')
##ORIGINAL UNCHANGED
#what coordinates will the static image be placed:
where = 200, 200
#draw surf to screen and catch the rect that blit returns
blittedRect = screen.blit(ball, where)
screen.blit(background, (0,0))
##ROTATED
#get center of surf for later
oldCenter = blittedRect.center
#rotate surf by DEGREE amount degrees
rotatedSurf = pygame.transform.rotate(ball, degree)
#get the rect of the rotated surf and set it's center to the oldCenter
rotRect = rotatedSurf.get_rect()
rotRect.center = oldCenter
#draw rotatedSurf with the corrected rect so it gets put in the proper spot
screen.blit(rotatedSurf, rotRect)
degree-=5
if degree <-90:
degree = 0
#show the screen surface
pygame.display.flip()
pygame.time.wait(60)
|
Soupupup/pythonsensorgame
|
rotation test 2.py
|
rotation test 2.py
|
py
| 1,287 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.init",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.rotate",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.flip",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.wait",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 49,
"usage_type": "attribute"
}
] |
5564785730
|
from src.core.interfaces.model_abc import ModelAbstract
from src.core.interfaces.repository_abc import RepositoryAbstract
from src.shared import parse_config
from sqlalchemy.orm import Session
import logging
import logging.config
import dataclasses
from typing import List
from datetime import datetime
class SqlAlchemyRepository(RepositoryAbstract):
def __init__(self, session, object_type) -> None:
self.session: Session = session
self.object_type: ModelAbstract = object_type
logging.config.dictConfig(parse_config.get_logger_config())
self.logger = logging.getLogger("dev")
self.logger.debug("{} repository instanciated".format(object_type.__name__))
def get_by_filter(self, filter) -> ModelAbstract:
self.logger.info("get_by_filter Request")
query = self.session.query(self.object_type).filter_by(**filter)
result = self.session.execute(query).fetchall()
return result
def get_by_id(self, target_id: int) -> ModelAbstract:
"""This function retrieves an object from the database.
Return:
The object (ModelAbstract)
"""
self.logger.info("get_by_id Request")
result = self.session.query(self.object_type).get(target_id)
return result
def get_all(self) -> List[ModelAbstract]:
"""This function retrieves all the objects in the repository.
Returns:
A list of the objects (List[ModelAbstract])
"""
self.logger.info("get_all Request")
result = self.session.query(self.object_type).all()
return result
def delete_by_filter(self, filter) -> int:
"""This function deletes an object using the provided filter.
Returns:
Affected rows provided by the database engine (int)
"""
self.logger.info("delete_by_filter Request")
result = self.session.query(self.object_type).filter(filter).delete()
return result
def delete_by_id(self, target_id: int) -> int:
"""This function deletes an object by it's id.
Returns:
Affected rows provided by the database engine (int)
"""
self.logger.info("delete_by_id Request")
result = self.session.query(self.object_type).filter(self.object_type.id == target_id).delete()
self.session.commit()
return result
def insert(self, new: ModelAbstract) -> int:
"""This function inserts an object to the database.
Returns:
Inserted object id (int)
"""
self.logger.info("insert Request")
self.session.add(new)
self.session.commit()
return new.id
def update(self, values_to_update, requested_object) -> ModelAbstract:
self.logger.info("update Request")
for key, value in values_to_update:
if value is not None:
setattr(requested_object, key, value)
setattr(requested_object, "modified_at", datetime.now())
self.session.commit()
return requested_object
|
ricky-codes/APIGest
|
src/infrastructure/services/repository.py
|
repository.py
|
py
| 3,071 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "src.core.interfaces.repository_abc.RepositoryAbstract",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "src.core.interfaces.model_abc.ModelAbstract",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "logging.config.dictConfig",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.config",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "src.shared.parse_config.get_logger_config",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "src.shared.parse_config",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "src.core.interfaces.model_abc.ModelAbstract",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "src.core.interfaces.model_abc.ModelAbstract",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "src.core.interfaces.model_abc.ModelAbstract",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "src.core.interfaces.model_abc.ModelAbstract",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "src.core.interfaces.model_abc.ModelAbstract",
"line_number": 84,
"usage_type": "name"
}
] |
35522645807
|
import requests
from time import sleep
timeToWait = 300 # Time to wait between callouts (in seconds)
while (True):
# Get list of commands to run this callout
URL = "https://slack.flemingcaleb.com:5000/api/agent/4/command/"
r = requests.get(url=URL)
if r.status_code == requests.codes.ok:
# Process the list of requests
print(r)
elif r.status_code == requests.codes.not_found:
# No list this time
print("No list this time")
else:
#Handle Unintended Error
print("ERROR")
sleep(timeToWait)
|
flemingcaleb/InfraBot
|
agent/agent.py
|
agent.py
|
py
| 567 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "requests.codes",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "requests.codes",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 21,
"usage_type": "call"
}
] |
38076886961
|
import os
from pathlib import Path
from tempfile import NamedTemporaryFile
import pytest
import pytest_mock
from werkzeug.exceptions import NotFound
from iceart.models import Artist, ArtistDto, ArtistViewModel
def test_artist_vm_init():
with pytest.raises(NotFound):
ArtistViewModel(-1)
def test_artist_vm_search_key():
assert ArtistViewModel(44).search_key() == {"_id": 44}
def test_artist_init():
# Arrange
data = {"_id": 44, "title": "t", "info": "i", "file": "f", "paintings": [5, 3, 77]}
# Act
model = Artist(data)
# Assert
assert model.identity == 44
assert model.title == "t"
assert model.info == "i"
assert model.file == "f"
assert model.paintings == [5, 3, 77]
def test_artist_dto_init():
# Arrange
data = {"_id": 44, "title": "t", "info": "i", "file": "f", "paintings": [5, 3, 77]}
model = Artist(data)
with NamedTemporaryFile("w+b", delete=False) as tmp_file:
tmp_file.write(b"\xab\xef")
p = Path(tmp_file.name)
# Act
with pytest_mock.mock.patch("pathlib.Path.joinpath", return_value=p):
dto = ArtistDto(model, {1: "img1", 2: "img2"})
# Assert
assert dto.image == "q+8="
assert dto.id == 44
assert dto.title == "t"
assert dto.info == "i"
assert dto.as_json() == {
"id": 44,
"title": "t",
"info": "i",
"image": "q+8=",
"paintings": {1: "img1", 2: "img2"},
}
assert dto.paintings == {1: "img1", 2: "img2"}
# Cleanup
os.remove(p)
|
JonSteinn/iceart_api
|
tests/test_models/test_artist.py
|
test_artist.py
|
py
| 1,537 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "pytest.raises",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "werkzeug.exceptions.NotFound",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "iceart.models.ArtistViewModel",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "iceart.models.ArtistViewModel",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "iceart.models.Artist",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "iceart.models.Artist",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "tempfile.NamedTemporaryFile",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pytest_mock.mock.patch",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "pytest_mock.mock",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "iceart.models.ArtistDto",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 63,
"usage_type": "call"
}
] |
75401661626
|
import numpy as np
import time
import torch
from multiprocessing import Array, Manager
from dgl.dataloading.dataloader import GraphCollator
from collections import deque, namedtuple
from gp.utils.datasets import DatasetWithCollate
"""
Following namedtuple makes data collating nad referencing easier
"""
GraphLabelNT = namedtuple("GraphLabelNT", ["g", "labels", "index"])
ReplayBatch = namedtuple(
"ReplayBatch",
["g", "plabel", "nlabel", "select_node", "move_node", "reward"],
)
GraphNMLabel = namedtuple("GraphNMLabel", ["g", "rlabel", "labels", "index"])
ReplayBatchHist = namedtuple(
"ReplayBatchHist",
[
"g",
"plabel",
"nlabel",
"select_node",
"move_node",
"reward",
"context",
],
)
ReplayBatchSM = namedtuple(
"ReplayBatchSM",
[
"g",
"plabel",
"nlabel",
"select_node",
"move_node",
"reward",
],
)
class GraphLabelDataset(DatasetWithCollate):
def __init__(self, graphs, labels, ind=None) -> None:
super().__init__()
self.graphs = graphs
self.labels = labels
if ind is None:
self.ind = np.arange(len(self.graphs))
else:
self.ind = ind
def __getitem__(self, index):
return GraphLabelNT(
self.graphs[index], np.array([self.labels[index]]), self.ind[index]
)
def __len__(self):
return len(self.graphs)
def get_collate_fn(self):
return GraphCollator().collate
class GraphNMDataset(DatasetWithCollate):
def __init__(self, graphs, labels, r_labels) -> None:
super().__init__()
self.graphs = graphs
self.labels = labels
self.rlabels = r_labels
def __getitem__(self, index):
return GraphNMLabel(
self.graphs[index],
np.array(self.rlabels[index]),
np.array([self.labels[index]]),
index,
)
def __len__(self):
return len(self.graphs)
def get_collate_fn(self):
return GraphCollator().collate
class GraphReplayBuffer:
def __init__(self, capacity):
# self.buffer = deque(maxlen=capacity)
self.capacity = capacity
self.buffer = np.empty(capacity, dtype=object)
self.size_count = 0
self.pointer = 0
def __len__(self):
return self.size_count
def add(self, experience):
if isinstance(experience, list):
ct = len(experience)
self.size_count += ct
next_pointer = self.pointer + ct
if next_pointer > self.capacity:
reminder = self.capacity - self.pointer
self.buffer[self.pointer :] = experience[:reminder]
self.pointer = 0
next_pointer = next_pointer - self.capacity
ct = next_pointer
experience = experience[reminder:]
self.buffer[self.pointer : next_pointer] = experience
self.pointer = next_pointer
self.size_count = min(self.size_count, self.capacity)
else:
self.buffer[self.size_count] = experience
# if (len(self) / self.capacity) > 1.3:
# self.buffer = self.buffer[-self.capacity :]
def sample(self, batch_size):
if len(self) < batch_size:
return None
indices = np.random.choice(len(self), batch_size, replace=True)
return [self.buffer[idx] for idx in indices]
def reset(self):
self.buffer = deque(maxlen=self.capacity)
class ReplayDataset(DatasetWithCollate):
def __init__(self, graphs, buffer, replay_size=1, replay_type=ReplayBatch):
super().__init__()
self.graphs = graphs
self.buffer = buffer
self.replay_size = replay_size
self.replay_type = replay_type
def __getitem__(self, index):
# print(len(self.buffer))
replay = self.buffer.sample(1)[0]
replay_graph = self.graphs[int(replay[0])]
return self.replay_type(replay_graph, *map(np.array, replay[1:]))
def __len__(self):
return len(self.graphs) * self.replay_size
def get_collate_fn(self):
return GraphCollator().collate
|
LechengKong/MAG-GNN
|
dataset.py
|
dataset.py
|
py
| 4,231 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "collections.namedtuple",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "gp.utils.datasets.DatasetWithCollate",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "dgl.dataloading.dataloader.GraphCollator",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "gp.utils.datasets.DatasetWithCollate",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "dgl.dataloading.dataloader.GraphCollator",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "gp.utils.datasets.DatasetWithCollate",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "dgl.dataloading.dataloader.GraphCollator",
"line_number": 154,
"usage_type": "call"
}
] |
39756253957
|
"""
Plot a grid on H2
with Poincare Disk visualization.
"""
import logging
import os
import matplotlib.pyplot as plt
import numpy as np
import geomstats.visualization as visualization
from geomstats.geometry.hyperbolic import Hyperbolic
H2 = Hyperbolic(dimension=2)
METRIC = H2.metric
def main(left=-128,
right=128,
bottom=-128,
top=128,
grid_size=32,
n_steps=512):
starts = []
ends = []
for p in np.linspace(left, right, grid_size):
starts.append(np.array([top, p]))
ends.append(np.array([bottom, p]))
for p in np.linspace(top, bottom, grid_size):
starts.append(np.array([p, left]))
ends.append(np.array([p, right]))
starts = [H2.intrinsic_to_extrinsic_coords(s) for s in starts]
ends = [H2.intrinsic_to_extrinsic_coords(e) for e in ends]
ax = plt.gca()
for start, end in zip(starts, ends):
geodesic = METRIC.geodesic(initial_point=start,
end_point=end)
t = np.linspace(0, 1, n_steps)
points_to_plot = geodesic(t)
visualization.plot(
points_to_plot, ax=ax, space='H2_poincare_disk', marker='.', s=1)
plt.show()
if __name__ == "__main__":
if os.environ['GEOMSTATS_BACKEND'] == 'tensorflow':
logging.info('Examples with visualizations are only implemented '
'with numpy backend.\n'
'To change backend, write: '
'export GEOMSTATS_BACKEND = \'numpy\'.')
else:
main()
|
hhajri/geomstats
|
examples/plot_grid_h2.py
|
plot_grid_h2.py
|
py
| 1,552 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "geomstats.geometry.hyperbolic.Hyperbolic",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "geomstats.visualization.plot",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "geomstats.visualization",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 49,
"usage_type": "call"
}
] |
41843820390
|
import numpy as np
from sklearn.metrics import f1_score
from sklearn.naive_bayes import BernoulliNB
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
class NaiveBayes:
"""
Naive Bayes implementation based on Multi-variate Bernoulli using python
"""
def __init__(self):
"""
self.class_probability --> Class probability of shape (output_label, ). It indicates the probability of alpha label appearing
without seeing the input data.
self.phi --> Probability of alpha input feature given alpha output label. P(x|y_train). shape (output_label, input_feature)
self.output_label --> Number of output class
self.input_feature --> Number of input feature
"""
self.class_probability = None
self.phi = None
self.output_label = None
def fit(self, x_train, y_train):
"""
Train the model
:param x_train: Input training example of shape (number of training data, input feature)
:param y_train: Output training example of shape (number of training data, )
:return:
"""
m = x_train.shape[0] # Number of training example
# Flatten the training set
x_train = x_train.reshape(m, -1)
input_features = x_train.shape[1]
self.output_label = len(np.unique(y_train.reshape(-1)))
# Initialize everything with zero
self.class_probability = np.zeros(self.output_label)
self.phi = np.zeros((self.output_label, input_features))
# Calculate class probability and phi
for label in range(self.output_label):
# Extract the training data from an individual labels
current_label_data = x_train[y_train == label]
# Number of occurarances of this particular label in the training set
current_label_occur = current_label_data.shape[0]
# Class label of this training data
self.class_probability[label] = (current_label_occur + 1) / (m + self.output_label)
# Calculate phi for an individual label
# How many times each of the input feature appeared for this label
# One is added for laplace smoothing
input_feature_occur = np.sum(current_label_data, axis=0) + 1
# Fix the denominator according to the laplace smoothing
curr_label_laplace_smoothing = current_label_occur + self.output_label
# Calculate phi
self.phi[label, :] = input_feature_occur / curr_label_laplace_smoothing
def predict(self, x_test):
"""
Make prediction
:param x_test: data to predict of shape (number of prediction, input feature)
:return:
"""
# Number of prediction
num_of_test = x_test.shape[0]
# Probability of each of the class.
# Initially each of the label will have zero probability
probabilities = np.zeros((num_of_test, self.output_label))
# Calculate for all test
for test_index in range(num_of_test):
# Count probabilities for each of the classes
for label in range(self.output_label):
# First get all the words present in this test example
words_for_this_example = x_test[test_index] == 1
# Get the calculated probabilities for this label and this ese words example
words_probabilities = self.phi[label][words_for_this_example]
# Multiply all these probability
words_probability_multiply = np.prod(words_probabilities)
# Multiply this with class_probability probabilities/class probabilities
# to get the overall probability of this example
probabilities[test_index, label] = words_probability_multiply * self.class_probability[label]
# Normalize the probabilities
probabilities[test_index] /= np.sum(probabilities[test_index])
# return the maximum probability index
return np.argmax(probabilities, axis=1)
def get_f1_score(self, x_test, y_test):
"""
Calculate the f1 score of our model
:param x_test:
:param y_test:
:return:
"""
return f1_score(y_test, self.predict(x_test))
if __name__ == '__main__':
# Test our model with amazon cell review dataset.
# Where model will predict if alpha review is positive or negetive
# Read the dataset
with open("amazon_cells_labelled.txt", "r") as file:
# This will contain all the sentences
sentences = []
# This will contain all the output label(0, 1)
labels = []
for line in file.readlines():
# The label and sentences are separated with alpha tab
line_arr = line.strip().split("\t")
# Remove stop words
sentences.append(line_arr[0])
labels.append(int(line_arr[1]))
# Vectorize the training sentences
vectorizer = CountVectorizer(analyzer="word", lowercase=True, stop_words="english", max_features=4500)
data = vectorizer.fit_transform(sentences).toarray()
# Split data
x_train, x_test, y_train, y_test = train_test_split(data, np.array(labels))
# Fit to our model
naive_bayes = NaiveBayes()
naive_bayes.fit(x_train, y_train)
model_f1_score = naive_bayes.get_f1_score(x_test, y_test)
print("F1 score of test set of our model is : ", str(model_f1_score))
# Compare with scikit-learn model
sci_naive_bayes = BernoulliNB()
sci_naive_bayes.fit(x_train, y_train)
sk_prediction = sci_naive_bayes.predict(x_test)
print("F1 score of the test set of scikit learn model is : ", str(f1_score(y_test, sk_prediction)))
|
gmortuza/machine-learning-scratch
|
machine_learning/bayesian/naive_bayes/naive_bayes.py
|
naive_bayes.py
|
py
| 5,821 |
python
|
en
|
code
| 6 |
github-code
|
6
|
[
{
"api_name": "numpy.unique",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.prod",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.f1_score",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.CountVectorizer",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "sklearn.naive_bayes.BernoulliNB",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.f1_score",
"line_number": 128,
"usage_type": "call"
}
] |
19365158235
|
import pickle, ssl, logging
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import pytorch_lightning as pl
import models, curves, utils1, utils, preresnet, resnet
DATASET = datasets.CIFAR10
TEST_ITEMS = 50_000
BATCH_SIZE = 128
WORKER_COUNT = 6
EPOCHS = 120
LEARNING_RATE=0.1
LR_GAMMA=0.1
L2_REG = 5e-4
MOMENTUM = 0.9
PARALLEL_MODELS = 1
MODEL = resnet.ResNet18 if DATASET == datasets.CIFAR10 else resnet.ResNet14MNIST
CURVE_BENDS = 3
CURVE_NUM_SAMPLES = 61
STEP_MODES = 2
STEP_BAD_MODES = 2
CODE_CHECK = False
if CODE_CHECK:
TEST_ITEMS = 5000
WORKER_COUNT = 0
EPOCHS = 1
CURVE_NUM_SAMPLES = 2
LOADER_ARGS = {"batch_size":BATCH_SIZE, "num_workers":WORKER_COUNT, "persistent_workers":WORKER_COUNT>0}
TRAINER_ARGS = {"accelerator":"gpu", "devices":"auto", "max_epochs":EPOCHS, "precision":16}
class LitModel(pl.LightningModule):
def __init__(self):
super().__init__()
if PARALLEL_MODELS > 1:
self.model = models.ParallelModel(PARALLEL_MODELS, MODEL, 10, **MODEL.kwargs)
else:
self.model = MODEL.base(10, **MODEL.kwargs)
self.loss = nn.CrossEntropyLoss()
self.mode = None
def forward(self, x):
return self.model(x)
def process_batch(self, batch):
x, y = batch
if PARALLEL_MODELS > 1:
y = torch.cat([y for _ in range(PARALLEL_MODELS)], dim=0)
return self(x), y
def training_step(self, batch, batch_idx):
y_hat, y = self.process_batch(batch)
loss = self.loss(y_hat, y)
self.log("train_loss", loss.item(), on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
y_hat, y = self.process_batch(batch)
loss = self.loss(y_hat, y)
correct = (y_hat.argmax(1) == y).type(torch.float).sum().item()
acc = 100*correct / y.size(dim=0)
self.log("val_loss", loss)
self.log("val_acc", acc)
def test_step(self, batch, batch_idx):
y_hat, y = self.process_batch(batch)
loss = self.loss(y_hat, y)
correct = (y_hat.argmax(1) == y).type(torch.float).sum().item()
acc = 100*correct / y.size(dim=0)
self.log("test_loss", loss)
self.log("test_acc", acc)
def configure_optimizers(self):
optimiser = torch.optim.SGD(self.parameters(), lr=LEARNING_RATE*PARALLEL_MODELS, momentum=MOMENTUM, weight_decay=L2_REG)
scheduler_dict = {
"scheduler": torch.optim.lr_scheduler.StepLR(optimiser, step_size=max(1,EPOCHS//3), gamma=LR_GAMMA),
"interval": "epoch"
}
return {"optimizer":optimiser, "lr_scheduler":scheduler_dict}
class LitModelConnect(pl.LightningModule):
def __init__(self, start_model = None, end_model = None, num_bends=CURVE_BENDS):
super().__init__()
self.loss = nn.CrossEntropyLoss()
self.model = curves.CurveNet(10, curves.PolyChain, MODEL.curve, num_bends, architecture_kwargs=MODEL.kwargs)
self.t = None
self.update_bn = False
# Initialise curve weights
if start_model != None:
self.model.import_base_parameters(start_model.model, 0)
self.model.import_base_parameters(end_model.model, num_bends - 1)
self.model.init_linear()
def forward(self, x, **kwargs):
return self.model(x, **kwargs)
def regulariser(self):
return 0.5 * L2_REG * self.model.l2
def process_batch(self, batch):
x, y = batch
return self(x, t=self.t), y
def set_t(self, t):
self.t = t
def training_step(self, batch, batch_idx):
y_hat, y = self.process_batch(batch)
loss = self.loss(y_hat, y) + self.regulariser()
self.log("train_loss", loss.item(), on_epoch=True)
return loss
def on_test_start(self):
if self.update_bn:
self.model.train()
def test_step(self, batch, batch_idx):
y_hat, y = self.process_batch(batch)
nll = self.loss(y_hat, y)
loss = nll + self.regulariser()
correct = (y_hat.argmax(1) == y).type(torch.float).sum().item()
acc = 100*correct / y.size(dim=0)
self.log("test_nll", nll)
self.log("test_loss", loss)
self.log("test_acc", acc)
def configure_optimizers(self):
optimiser = torch.optim.SGD(
filter(lambda param: param.requires_grad, self.parameters()),
lr=LEARNING_RATE,
momentum=MOMENTUM
)
scheduler_dict = {
"scheduler": torch.optim.lr_scheduler.StepLR(optimiser, step_size=max(1,EPOCHS//3), gamma=LR_GAMMA),
"interval": "epoch"
}
return {"optimizer":optimiser, "lr_scheduler":scheduler_dict}
def update_bn(model, loader):
if not utils.check_bn(model): return
bn_trainer = pl.Trainer(logger=False,**TRAINER_ARGS)
model.update_bn = True
bn_trainer.test(model, loader, verbose=False)
model.update_bn = False
def testCurve(model, trainer, test_loader, train_loader=None):
ts = np.linspace(0.0, 1.0, CURVE_NUM_SAMPLES)
if train_loader == None:
train_loader = test_loader
# BN has momentum so iter a few times to warm up
model.set_t(0.0)
for _ in range(3):
update_bn(model, train_loader)
# Test and compute max stats
max_loss = -1
for t in ts:
model.set_t(t)
update_bn(model, train_loader)
metrics = trainer.test(model, test_loader)
max_loss = max(max_loss, metrics[0]["test_loss"])
return max_loss
if __name__ == "__main__":
# Select training device
use_cuda = torch.cuda.is_available()
if use_cuda:
LOADER_ARGS["pin_memory"] = True
device = "cuda" if use_cuda else "cpu"
print(f"Training on {device}")
# Setup logging
logging.getLogger("lightning").setLevel(logging.ERROR)
def genLogger(log, path):
return pl.loggers.CSVLogger(path, name=log)
# Setup checkpointing
checkpoint = pl.callbacks.ModelCheckpoint(
#dirpath="checkpoints",
#save_last=True,
save_top_k=0,
save_weights_only=True
)
# Setup progress bar
progress_bar = pl.callbacks.RichProgressBar()
TRAINER_ARGS["callbacks"] = [checkpoint, progress_bar]
# ------------------------
# Prepare Datasets / Loaders
# ------------------------
# CIFAR10 has an expired cert
ssl._create_default_https_context = ssl._create_unverified_context
# Too many files open with file_descriptor strategy
torch.multiprocessing.set_sharing_strategy('file_system')
transform = []
# if DATASET == datasets.CIFAR10:
# transform += [transforms.Grayscale()]
transform += [
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]
transform = transforms.Compose(transform)
train_data = DATASET(
root=".data",
train=True,
download=True,
transform=transform
)
# Truncate data for quick checks
if TEST_ITEMS < 50_000:
train_data = torch.utils.data.Subset(train_data, range(TEST_ITEMS))
adverse_data = utils1.NoiseDataset(train_data)
test_data = DATASET(
root=".data",
train=False,
download=True,
transform=transform
)
# train_data = utils1.GPUDataset(train_data, "cuda")
# adverse_data = utils1.GPUDataset(adverse_data, "cuda")
# test_data = utils1.GPUDataset(test_data, "cuda")
train_loader = DataLoader(train_data, shuffle=True, **LOADER_ARGS)
adverse_loader = DataLoader(adverse_data, shuffle=True, **LOADER_ARGS)
LOADER_ARGS["batch_size"] = 1024
test_loader = DataLoader(test_data, **LOADER_ARGS)
curve_loader = DataLoader(train_data, **LOADER_ARGS)
# ------------------------
# Generate Data
# ------------------------
# Load state
state = {"minima":[], "paths":[]}
try:
state_file = open('state.p', 'rb')
state = pickle.load(state_file)
state_file.close()
except FileNotFoundError:
pass
# ------------------------
# Generate Minima
# ------------------------
# Generate good minima
good_minima = []
for _ in range(STEP_MODES):
model = LitModel()
model.mode = "train"
path=f"modes/{len(state['minima'])}"
trainer = pl.Trainer(logger=genLogger("train", path),**TRAINER_ARGS)
trainer.fit(model, train_loader, test_loader)
metrics = trainer.test(model, test_loader)[0]
trainer.save_checkpoint(path+"/model.ckpt")
record = {"idx":len(state["minima"]), "type":"good", "path":path, "loss":metrics["test_loss"], "acc":metrics["test_acc"]}
good_minima.append(record)
state["minima"].append(record)
# Generate adversarial init
model = LitModel()
model.mode = "adverse"
trainer = pl.Trainer(logger=genLogger("adverse", "modes"), **TRAINER_ARGS)
trainer.fit(model, adverse_loader)
trainer.save_checkpoint("modes/adverse.ckpt")
# Generate bad minima
bad_minima = []
for _ in range(STEP_BAD_MODES):
model = LitModel.load_from_checkpoint("modes/adverse.ckpt")
model.mode = "train"
path=f"modes/{len(state['minima'])}"
trainer = pl.Trainer(logger=genLogger("train", path), **TRAINER_ARGS)
trainer.fit(model, train_loader, test_loader)
metrics = trainer.test(model, test_loader)[0]
trainer.save_checkpoint(path+"/model.ckpt")
record = {"idx":len(state["minima"]), "type":"bad", "path":path, "loss":metrics["test_loss"], "acc":metrics["test_acc"]}
bad_minima.append(record)
state["minima"].append(record)
# ------------------------
# Generate Curves
# ------------------------
new_curves = []
# Connect good minima to existing
for mode in good_minima:
if mode["idx"] == 0: continue
other = np.random.randint(mode["idx"])
while state["minima"][other]["type"] != "good":
other = np.random.randint(mode["idx"])
new_curves.append((mode["idx"], other))
# Connect a bad minimum to a good minimum
other = np.random.randint(bad_minima[0]["idx"])
while state["minima"][other]["type"] != "good":
other = np.random.randint(bad_minima[0]["idx"])
new_curves.append((bad_minima[0]["idx"], other))
# Curve directly between bad minima
new_curves.append((bad_minima[0]["idx"], bad_minima[1]["idx"]))
# Random new curves
start = np.random.randint(len(state["minima"]))
other = np.random.randint(len(state["minima"]))
while start == other:
other = np.random.randint(len(state["minima"]))
new_curves.append((start, other))
start = np.random.randint(len(state["minima"]))
other = np.random.randint(len(state["minima"]))
while start == other:
other = np.random.randint(len(state["minima"]))
new_curves.append((start, other))
for start_idx, end_idx in new_curves:
start = LitModel.load_from_checkpoint(state["minima"][start_idx]["path"] + "/model.ckpt")
end = LitModel.load_from_checkpoint(state["minima"][end_idx]["path"] + "/model.ckpt")
path=f"curves/{len(state['paths'])}"
linear = LitModelConnect(start, end, 2)
trainer = pl.Trainer(logger=genLogger("linear", path), **TRAINER_ARGS)
lin_loss = testCurve(linear, trainer, curve_loader, test_loader)
trainer.save_checkpoint(path + "/linear.ckpt")
curve = LitModelConnect(start, end, CURVE_BENDS)
trainer = pl.Trainer(logger=genLogger("curve", path), **TRAINER_ARGS)
trainer.fit(curve, train_loader)
trainer.save_checkpoint(path + "/curve.ckpt")
curve_loss = testCurve(curve, trainer, curve_loader, test_loader)
curve_test_loss = testCurve(curve, trainer, test_loader, test_loader)
record = {"idx":len(state["paths"]), "path":path, "start":start_idx, "end":end_idx, "lin_loss":lin_loss, "curve_loss":curve_loss, "curve_test_loss":curve_test_loss}
state["paths"].append(record)
# Save state
pickle.dump(state, open('state.p', 'wb'))
|
jonasjuerss/mode-connectivity
|
james/main.py
|
main.py
|
py
| 12,280 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torchvision.datasets.CIFAR10",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torchvision.datasets",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torchvision.datasets.CIFAR10",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "torchvision.datasets",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "resnet.ResNet18",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "resnet.ResNet14MNIST",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "pytorch_lightning.LightningModule",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "models.ParallelModel",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "torch.float",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "torch.float",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.SGD",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.lr_scheduler.StepLR",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "pytorch_lightning.LightningModule",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "curves.CurveNet",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "curves.PolyChain",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "torch.float",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.SGD",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.lr_scheduler.StepLR",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "utils.check_bn",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "pytorch_lightning.Trainer",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "logging.ERROR",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "pytorch_lightning.loggers.CSVLogger",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "pytorch_lightning.loggers",
"line_number": 200,
"usage_type": "attribute"
},
{
"api_name": "pytorch_lightning.callbacks.ModelCheckpoint",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "pytorch_lightning.callbacks",
"line_number": 203,
"usage_type": "attribute"
},
{
"api_name": "pytorch_lightning.callbacks.RichProgressBar",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "pytorch_lightning.callbacks",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "ssl._create_default_https_context",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "ssl._create_unverified_context",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "torch.multiprocessing.set_sharing_strategy",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "torch.multiprocessing",
"line_number": 223,
"usage_type": "attribute"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 230,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.Subset",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 244,
"usage_type": "attribute"
},
{
"api_name": "utils1.NoiseDataset",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "pytorch_lightning.Trainer",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "pytorch_lightning.Trainer",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "pytorch_lightning.Trainer",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 333,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 335,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 340,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 342,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 349,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 350,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 352,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 355,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 356,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 358,
"usage_type": "attribute"
},
{
"api_name": "pytorch_lightning.Trainer",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "pytorch_lightning.Trainer",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 383,
"usage_type": "call"
}
] |
33205173219
|
from django.urls import path
from .views import (get_user, add_user, get_categories, get_recipe,
add_to_favourite, get_user_favourites,
get_recipes_in_category, get_random_recipe,
add_to_dislikes)
urlpatterns = [
path('users/<int:telegram_id>', get_user),
path('users/add/', add_user),
path('categories/', get_categories),
path('category/recipes/', get_recipes_in_category),
path('recipe/random/', get_random_recipe),
path('favourites/recipe/', get_recipe),
path('favourites/', get_user_favourites),
path('favourites/add', add_to_favourite),
path('dislikes/add', add_to_dislikes)
]
|
AlexanderZharyuk/recipes
|
recipes_admin_api/api/urls.py
|
urls.py
|
py
| 684 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "views.get_user",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "views.add_user",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "views.get_categories",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "views.get_recipes_in_category",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "views.get_random_recipe",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "views.get_recipe",
"line_number": 18,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "views.get_user_favourites",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "views.add_to_favourite",
"line_number": 20,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "views.add_to_dislikes",
"line_number": 22,
"usage_type": "argument"
}
] |
74056199547
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .activations import ACTIVATIONS
class Embedding(nn.Module):
'''
Abstract base class for any module that embeds a collection of N vertices into
N hidden states
'''
def __init__(self, features, hidden, **kwargs):
super().__init__()
self.features = features
self.hidden = hidden
def forward(self, x):
pass
class Constant(Embedding):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, x):
return x
class OneLayer(Embedding):
def __init__(self, features, hidden, act=None, wn=False, **kwargs):
super().__init__(features, hidden)
self.fc = nn.Linear(features, hidden)
if wn:
self.fc = nn.utils.weight_norm(self.fc, name='weight')
self.activation = ACTIVATIONS[act]()
def forward(self, x):
return self.activation(self.fc(x))
class TwoLayer(Embedding):
def __init__(self, features, hidden, act=None, wn=False, **kwargs):
super().__init__(features, hidden)
self.e1 = OneLayer(features, hidden, act, wn)
self.e2 = OneLayer(hidden, hidden, act, wn)
def forward(self, x):
return self.e1(self.e2(x))
class NLayer(nn.Module):
def __init__(self, dim_in=None, dim_out=None, n_layers=None, dim_hidden=None, act=None, wn=False, **kwargs):
super().__init__()
self.activation = ACTIVATIONS[act]()
if dim_hidden is None:
dim_hidden = dim_out
dims = [dim_in] + [dim_hidden] * (n_layers-1) + [dim_out]
self.fcs = nn.ModuleList()
for i in range(len(dims)-1):
fc = nn.Linear(dims[i], dims[i+1])
if wn:
fc = nn.utils.weight_norm(fc, name='weight')
self.fcs.append(fc)
def forward(self, x):
for fc in self.fcs:
x = fc(x)
x = self.activation(x)
return x
EMBEDDINGS = dict(
n=NLayer,
one=OneLayer,
two=TwoLayer,
const=Constant,
)
|
isaachenrion/jets
|
src/architectures/embedding/embedding.py
|
embedding.py
|
py
| 2,081 |
python
|
en
|
code
| 9 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torch.nn.utils.weight_norm",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.nn.utils",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "activations.ACTIVATIONS",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "activations.ACTIVATIONS",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "torch.nn.utils.weight_norm",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "torch.nn.utils",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 60,
"usage_type": "name"
}
] |
31146266095
|
import tensorflow as tf
import keras.backend as K
def huber_loss(y_true, y_pred):
return tf.losses.huber_loss(y_true, y_pred)
def adjust_binary_cross_entropy(y_true, y_pred):
return K.binary_crossentropy(y_true, K.pow(y_pred, 2))
def MMD_Loss_func(num_source, sigmas=None):
if sigmas is None:
sigmas = [1, 5, 10]
def loss(y_true, y_pred):
cost = []
for i in range(num_source):
for j in range(num_source):
domain_i = tf.where(tf.equal(y_true, i))[:, 0]
domain_j = tf.where(tf.equal(y_true, j))[:, 0]
single_res = mmd_two_distribution(K.gather(y_pred, domain_i),
K.gather(y_pred, domain_j),
sigmas=sigmas)
cost.append(single_res)
#print("wtf")
cost = K.concatenate(cost)
return K.mean(cost)
return loss
def mmd_two_distribution(source, target, sigmas):
sigmas = K.constant(sigmas)
xy = rbf_kernel(source, target, sigmas)
xx = rbf_kernel(source, source, sigmas)
yy = rbf_kernel(target, target, sigmas)
return xx + yy - 2 * xy
def rbf_kernel(x, y, sigmas):
beta = 1. / (2. * (tf.expand_dims(sigmas, 1)))
dist = compute_pairwise_distances(x, y)
dot = -K.dot(beta, K.reshape(dist, (1, -1)))
exp = K.exp(dot)
return K.mean(exp, keepdims=True)
def compute_pairwise_distances(x, y):
norm = lambda x: K.sum(K.square(x), axis=1)
return norm(K.expand_dims(x, 2) - K.transpose(y))
|
rs-dl/MMD-DRCN
|
customLoss.py
|
customLoss.py
|
py
| 1,575 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "tensorflow.losses.huber_loss",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "tensorflow.losses",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "keras.backend.binary_crossentropy",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "keras.backend.pow",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tensorflow.where",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tensorflow.equal",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tensorflow.where",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "tensorflow.equal",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "keras.backend.gather",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "keras.backend.gather",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "keras.backend.concatenate",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "keras.backend.mean",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "keras.backend.constant",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "tensorflow.expand_dims",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "keras.backend.dot",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "keras.backend.reshape",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "keras.backend.exp",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "keras.backend.mean",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "keras.backend.sum",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "keras.backend.square",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "keras.backend.expand_dims",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "keras.backend.transpose",
"line_number": 53,
"usage_type": "call"
}
] |
34228298510
|
import os
import re
import sys
import glob
import builtins
from contextlib import contextmanager
import setuptools
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext as _build_ext
from setuptools.command.install import install as _install
with open("README.md", "r") as fh:
long_description = fh.read()
PACKAGENAME = "aliad"
VERSIONFILE = f"{PACKAGENAME}/_version.py"
verstrline = open(VERSIONFILE, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
verstr = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
setup(
name=PACKAGENAME, # Replace with your own username
version=verstr,
author="Alkaid Cheng",
author_email="[email protected]",
description="A library for anomaly detection.",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
package_data={PACKAGENAME: []},
exclude_package_data={PACKAGENAME: []},
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
install_requires=[
'numpy',
'pandas',
'matplotlib',
'click',
'quickstats'
],
scripts=[f'bin/{PACKAGENAME}'],
python_requires='>=3.8',
)
|
AlkaidCheng/aliad
|
setup.py
|
setup.py
|
py
| 1,655 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "re.search",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "re.M",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "setuptools.setup",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 34,
"usage_type": "call"
}
] |
13058259505
|
from datetime import datetime, timezone
from config import Config
from logger import logger
from model.coordinates import Coordinates
from model.media_type import MediaType
from model.ts_source import TsSource
class MediaFile:
original_path: str
original_filename: str
filename: str
media_type: MediaType
dir_path: list[str]
unix_time_sec: int = None
timestamp: datetime = None
ts_source: TsSource = None
mtime: datetime = None
index: int = None
coordinates: Coordinates = None
md5: str = None
target_dir = None
target_filename = None
target = None
def __init__(self, original_path: str, original_filename: str, filename: str, media_type: MediaType,
dir_path: list[str]):
self.original_path = original_path
self.original_filename = original_filename
self.filename = filename
self.media_type = media_type
self.dir_path = dir_path
def __repr__(self):
if self.ts_source == TsSource.EXIF:
sym = Config.SYM_CHECK
else:
sym = Config.SYM_MULTIPLICATION
return f'{sym} {self.ts_source} {self.dir_path}/{self.original_filename}, ts={self.timestamp}'
def __lt__(self, other):
return self.original_filename.lower() < other.original_filename.lower()
def has_timestamp(self) -> bool:
return self.timestamp is not None
def update_time(self, ts: datetime, source: TsSource, force: bool = False):
if ts is not None and (force or self.timestamp is None or ts < self.timestamp):
self.timestamp = ts.astimezone(timezone.utc)
self.unix_time_sec = int(self.timestamp.timestamp())
self.ts_source = source
if self.ts_source == TsSource.MTIME:
self.mtime = ts
def update_coordinates(self, coordinates: Coordinates):
if coordinates is not None:
self.coordinates = coordinates
@staticmethod
def create(source_dir: str, file_dir: str) -> 'MediaFile':
original_filename, filename, media_type, split_path = MediaFile.retrieve_filename_data(source_dir, file_dir)
return MediaFile(
original_path=file_dir,
original_filename=original_filename,
filename=filename,
media_type=media_type,
dir_path=split_path
)
@staticmethod
def retrieve_filename_data(source_dir: str, file_dir: str):
if not source_dir.endswith('/'):
source_dir += '/'
split_path = file_dir.replace(source_dir, '').split('/')
original_filename = split_path.pop()
fixed_filename = MediaFile.replace_dots(original_filename)
filename, extension = fixed_filename.split('.')
media_type = MediaType.from_string(extension)
return original_filename, filename, media_type, split_path
@staticmethod
def replace_dots(original_filename: str) -> str:
result = original_filename
while result.count('.') > 1:
result = result.replace('.', ' ', 1)
return result
|
mbogner/imagination
|
model/media_file.py
|
media_file.py
|
py
| 3,104 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "model.media_type.MediaType",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "model.ts_source.TsSource",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "model.coordinates.Coordinates",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "model.media_type.MediaType",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "model.ts_source.TsSource.EXIF",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "model.ts_source.TsSource",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "config.Config.SYM_CHECK",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "config.Config",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "config.Config.SYM_MULTIPLICATION",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "config.Config",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "model.ts_source.TsSource",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "datetime.timezone.utc",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "datetime.timezone",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "model.ts_source.TsSource.MTIME",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "model.ts_source.TsSource",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "model.coordinates.Coordinates",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "model.media_type.MediaType.from_string",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "model.media_type.MediaType",
"line_number": 84,
"usage_type": "name"
}
] |
3081251304
|
# -*- coding: utf-8 -*
"""Graphics
.. module:: graphics
:synopsis: Module for creating graphs
"""
# imports
import matplotlib.pyplot as plt
import numpy as np
import resoncalc.output as output
from math import ceil
from csv import DictReader
# globals
n_points = 1000 # count of points in graph
n_rows = 20.0 # count of rows in graph legend
def plot_fem_base_function(func, a, b):
"""Plot base function
Args:
func (func): base function
a (float): left boundary of interval
b (float): right boundary of interval
"""
# coordinates init
x = np.linspace(np.real(a), np.real(b), n_points)
n = len(x)
y = np.zeros(n, dtype=float)
# calculate function values
for i in range(n):
y[i] = np.real(func(x[i]))
# create graph
plt.plot(x, y)
plt.title('FEM base function')
plt.tight_layout()
plt.show()
def plot_fem_base(funcs, a, b):
"""Plot base functions
Args:
funcs (list): base functions
a (float): left boundary of interval
b (float): right boundary of interval
"""
# coordinate init
x = np.linspace(np.real(a), np.real(b), n_points)
n = len(x)
# function loop
for i in range(len(funcs)):
# coordinate init
y = np.zeros(n, dtype=float)
# bridging function
if (type(funcs[i]) is list):
for j in range(n):
y[j] = np.real(funcs[i][0](x[j]) + funcs[i][1](x[j]))
# function within element
else:
for j in range(n):
y[j] = np.real(funcs[i](x[j]))
# plot function
plt.plot(x, y, label=i+2)
# create graph
plt.title('FEM base functions')
plt.legend(bbox_to_anchor=(1, 1), borderaxespad=0.0, ncol=ceil(len(funcs)/n_rows))
plt.tight_layout()
plt.show()
def plot_potential(potential, a, b, *params):
"""Plot potential
Args:
potential (func): potential function
a (float): left boundary of interval
b (float): right boundary of interval
params (args): potential specific parameters
"""
# coordinates init
x = np.linspace(np.real(a), np.real(b), n_points)
n = len(x)
y = np.zeros(n, dtype=float)
# plot potential
for i in range(n):
y[i] = potential(x[i], *params)
plt.plot(x, y)
# create graph
plt.title('Potential')
plt.xlabel('x')
plt.ylabel('E')
plt.grid()
plt.tight_layout()
plt.show()
def plot_eigenstates(potential, eigenvalues, a, b, emax, fname='', *params):
"""Plot eigenstates for given potential
Args:
potential (func): potential function
eigenvalues (list): bound states
a (float): left boundary of interval
b (float): right boundary of interval
emax (float): maximum energy in atomic units
fname (str): export filename
params (args): potential specific parameters
"""
# coordinates init
eigenvalues = np.real(eigenvalues)
x = np.linspace(a, b, n_points)
n = len(x)
y = np.zeros(n, dtype=float)
# plot potential
for i in range(n):
y[i] = potential(x[i], *params)
plt.plot(x, y)
# plot eigenstates
for i in range(len(eigenvalues)):
z = np.zeros(n, dtype=float)
val = eigenvalues[i]
bound_state = True if (val < 0.0) else False
cross = 0
# line segment representing eigenstate
for j in range(n):
# intersection points with potential
if (val > y[j]):
if (cross == 0 or (cross == 2 and not bound_state)):
cross += 1
elif (cross == 1):
cross += 1
# plot line
if ((bound_state and cross == 1) or (not bound_state and cross == 2)):
z[j] = val
else:
z[j] = np.nan
plt.plot(x, z, 'r', label='E{0} = {1:e}'.format(i+1, val))
# create graph
plt.xlim(left=a, right=b)
plt.ylim(bottom=None, top=emax)
plt.title('Eigenstates')
plt.xlabel('x')
plt.ylabel('E')
plt.grid()
plt.legend(bbox_to_anchor=(1, 1), borderaxespad=0.0, ncol=ceil(len(eigenvalues)/n_rows))
plt.tight_layout()
# export graph
if (len(fname) > 0):
plt.savefig(fname)
plt.close()
# display graph
else:
plt.show()
def plot_complex_spectrum(eigenvalues, eigenvalues2=[], states=None, fname=''):
"""Plot complex spectrum used in ECS method
Args:
eigenvalues (list): eigenvalues for first angle or rotation
eigenvalues2 (list): eigenvalues for second angle of rotation, default empty
states (list): highlighted eigenstates
fname (str): export filename
"""
# plot first eigenvalues
x = []
y = []
if (states is None):
x = np.real(eigenvalues)
y = np.imag(eigenvalues)
else:
limit = np.abs(states[0])
for val in eigenvalues:
if (np.real(val) <= limit):
x.append(np.real(val))
y.append(np.imag(val))
plt.plot(x, y, 'b.')
# plot second eigenvalues
x = []
y = []
if (len(eigenvalues2) > 0):
if (states is None):
x = np.real(eigenvalues2)
y = np.imag(eigenvalues2)
else:
limit = np.abs(states[0])
for val in eigenvalues2:
if (np.real(val) <= limit):
x.append(np.real(val))
y.append(np.imag(val))
plt.plot(x, y, 'g.')
# highlight eigenstates
if (states is not None):
for state in states:
plt.plot(np.real(state), np.imag(state), 'ro', label='real={0:e}, imag={1:e}'.format(np.real(state), np.imag(state)))
plt.legend(loc='lower left')
# create graph
plt.title('Complex spectrum')
plt.xlabel('Re')
plt.ylabel('Im')
plt.grid()
# export graph
if (len(fname) > 0):
plt.savefig(fname)
plt.close()
# display graph
else:
plt.show()
def plot_resonances_complex(infile, outfile=''):
"""Plot resonances in complex plane
Args:
infile (str): input filename
outfile (str): output filename, default empty
"""
# parse csv file
with open(infile, 'r') as f:
reader = DictReader(f)
states = {}
# get resonance states, group by param2
for row in reader:
if (row['type'] == 'resonance'):
param2 = row['param2']
if (not param2 in states):
states[param2] = []
states[param2].append(float(row['real']) + 1j*float(row['imag']))
# plot state trajectories
for k, v in states.items():
plt.plot(np.real(v), np.imag(v), 'b-')
# create graph
plt.title('Resonance states')
plt.xlabel('Re')
plt.ylabel('Im')
plt.grid()
# export graph
if (len(outfile) > 0):
plt.savefig(outfile)
plt.close()
# display graph
else:
plt.show()
def plot_resonances_params(infile, energy=True, outfile=''):
"""Plot resonances according to parameters
Args:
infile (str): input filename
energy (bool): energy or width, default energy
outfile (str): output filename, default empty
"""
# parse csv file
with open(infile, 'r') as f:
reader = DictReader(f)
param1, param2, value = [], [], []
# get resonance states
for row in reader:
if (row['type'] == 'resonance'):
param1.append(float(row['param1']))
param2.append(float(row['param2']))
value.append(float(row['real']) if (energy) else -0.5 * float(row['imag']))
# create graph
plt.scatter(param1, param2, c=value, cmap=plt.cm.rainbow)
plt.colorbar()
plt.title('Resonance states {0}'.format('energy' if (energy) else 'width'))
plt.xlabel('param a')
plt.ylabel('param b')
plt.grid()
# export graph
if (len(outfile) > 0):
plt.savefig(outfile)
plt.close()
# display graph
else:
plt.show()
|
hydratk/resoncalc
|
src/resoncalc/graphics.py
|
graphics.py
|
py
| 8,384 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.linspace",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.real",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.real",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.real",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.real",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.real",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "math.ceil",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.real",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "numpy.real",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "math.ceil",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 180,
"usage_type": "name"
},
{
"api_name": "numpy.real",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "numpy.imag",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "numpy.real",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "numpy.real",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "numpy.imag",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 206,
"usage_type": "name"
},
{
"api_name": "numpy.real",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "numpy.imag",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "numpy.real",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "numpy.real",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "numpy.imag",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "numpy.real",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "numpy.imag",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 232,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 238,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 239,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "csv.DictReader",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 270,
"usage_type": "name"
},
{
"api_name": "numpy.real",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "numpy.imag",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 273,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 274,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 275,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 276,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 281,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 285,
"usage_type": "name"
},
{
"api_name": "csv.DictReader",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 310,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.cm",
"line_number": 310,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.colorbar",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 311,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 312,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 313,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 314,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 315,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 319,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 320,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 324,
"usage_type": "name"
}
] |
18339770466
|
import requests
import json
url = "https://api.telegram.org/bot5653233459:AAHWejZRnvy4luWTetBSbQY5jTzS11mA35U/sendMessage"
photo_url = "https://api.telegram.org/bot5653233459:AAHWejZRnvy4luWTetBSbQY5jTzS11mA35U/sendPhoto"
document_url = "https://api.telegram.org/bot5653233459:AAHWejZRnvy4luWTetBSbQY5jTzS11mA35U/sendDocument"
def buttons(chat_id):
payload = {
"photo": "https://blog.mint.com/wp-content/uploads/2013/02/1.jpg",
#"caption": caption,
"chat_id": chat_id,
"reply_markup": {
"inline_keyboard": [
[
{
"text": "Show Plans",
"callback_data": "Insurance Plan"
}
]
]
}
}
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
response = requests.post(photo_url, json=payload)
def button_1(chat_id):
payload = {
"photo": "AgACAgUAAxkBAAIGv2Mhrr21NIyhQKZHdVnObrNS0_SdAAIStTEb3U4IVSXi2GZ7s3TqAQADAgADcwADKQQ",
"chat_id": chat_id,
# "text": "Select one option",
"reply_markup": {
"inline_keyboard": [
[
{
"text": "Term Life",
"callback_data": "Term Life"
},
{
"text": "mediclaim",
"callback_data": "Mediclaim"
},
{
"text": "Accidental",
"callback_data": "Accidental Insurance"
},
]
]
}
}
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
response = requests.post(photo_url, json=payload)
print(response)
def term_life(chat_id):
payload = {
"chat_id": chat_id,
'document': 'BQACAgUAAxkBAAIClGMIdmbOEsi8xsAic-Bk0UahnIl5AAJCBgACDBJBVBLbNwoHbsUhKQQ',
#"text": "Choose Your Plan",
"reply_markup": {
"inline_keyboard": [
[
{
"text": "25Lac",
"callback_data": "25Lac"
},
{
"text": "50Lac",
"callback_data": "50Lac"
},
{
"text": "75Lac",
"callback_data": "75Lac"
},
{
"text": "1Cr",
"callback_data": "1r"
},
]
]
}
}
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
response = requests.post(document_url, json=payload)
def mediclaim(chat_id):
payload = {
"chat_id": chat_id,
#"text": "Choose Your Plan",
"document":"BQACAgUAAxkBAAICmmMIeJtMVL1v2JTsRhsdMb4uSDvCAAJEBgACDBJBVKwG4QUSEWW7KQQ",
"reply_markup": {
"inline_keyboard": [
[
{
"text": "2Lac",
"callback_data": "mediclaim up to 2Lac"
},
{
"text": "5Lac",
"callback_data": "mediclaim up to 5Lac"
},
{
"text": "10Lac",
"callback_data": "mediclaim up to 10Lac"
},
{
"text": "25Lac",
"callback_data": "mediclaim up to 25Lac"
},
]
]
}
}
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
response = requests.post(document_url, json=payload)
def accidental(chat_id):
payload = {
"chat_id": chat_id,
#"text": "Choose Your Plan",
"document":"BQACAgUAAxkBAAICmGMIeJMERoZF4atIdkk_L-cYGESUAAJDBgACDBJBVCJlGZUyjyGkKQQ",
"reply_markup": {
"inline_keyboard": [
[
{
"text": "5Lac",
"callback_data": "Plan of 5Lac"
},
{
"text": "10Lac",
"callback_data": "Plan of 10Lac"
},
{
"text": "25Lac",
"callback_data": "Plan of 25Lac"
},
{
"text": "50Lac",
"callback_data": "Plan of 50Lac"
},
]
]
}
}
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
response = requests.post(document_url, json=payload)
def existing_user(chat_id):
payload = {
"chat_id": chat_id,
"text": "Seems you are an existing user! To see your details click here ",
"reply_markup": {
"inline_keyboard": [
[
{
"text": "Show Details",
"callback_data": "show details"
}
]
]
}
}
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
response = requests.post(url, json=payload, headers=headers)
#print(response.text)
# import requests
# url = "https://api.telegram.org/bot5340261920:AAF2GGInKosubny7ox-CWeZyl8IMESgQg5o/sendPhoto"
# payload = {
# "photo": "AgACAgUAAxkBAAMLYvDs4xqN8GzQGUY555yHLr5joacAAjGxMRtMw4hX56puCadfo3cBAAMCAAN5AAMpBA",
# # "caption": "Optional",
# # "disable_notification": False,
# "reply_to_message_id": 0,
# "chat_id":1091996976
# }
# headers = {
# "Accept": "application/json",
# "User-Agent": "Telegram Bot SDK - (https://github.com/irazasyed/telegram-bot-sdk)",
# "Content-Type": "application/json"
# }
# response = requests.post(url, json=payload)
# print(response.text)
|
mayuritoro/tele_bot
|
tele_bot/trial.py
|
trial.py
|
py
| 5,973 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.post",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 196,
"usage_type": "call"
}
] |
14255581243
|
from selenium import webdriver
import time, re
from bs4 import BeautifulSoup
import pyautogui
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
import pyperclip
import os
# 主要功能就是访问300mium所有影片详情页,然后挨个下载封面
class Crawl_51luxu:
def main(self, Dir='F:\\pic\\', page=1, category='300MIUM'):
current_path = os.getcwd().replace('\\', '/') + '/'
# custom_path = 'F:\\pic\\300MIUM\\'
custom_path = Dir + category + "\\"
chrome_opts = webdriver.ChromeOptions()
chrome_opts.add_argument("--headless")
chrome_opts.add_experimental_option(
'excludeSwitches', ['enable-logging'])
url = 'https://www.51luxu.com/category/sresource/' + category + '/page/' + str(page)
def open_browser(url):
driver = webdriver.Chrome(options=chrome_opts)
driver.get(url)
return driver
def scrapy(driver):
if not os.path.exists(Dir):
os.mkdir(Dir)
if not os.path.exists(custom_path):
os.mkdir(custom_path)
Exist = []
if os.path.exists(custom_path + 'history.txt'):
with open(custom_path + 'history.txt','r+') as f:
lines = f.readlines()
for line in lines:
Exist.append(line.replace("\n",""))
f.close()
# 从history中读入历史的所下载的图片的名字,以免重复下载
# 这一步主要是为了,当我筛选图片时,看到好看的要保留,看到不好看的要删除
# 那么读取文件列表就乱了套了,所以把历史下载保存在txt文件中,就知道之前有没有下过这个番号了
for page in range(1,100):
try:
content = driver.page_source.encode('utf-8')
soup = BeautifulSoup(content, 'lxml')
img = soup.find_all('img')
src1 = re.findall(r'src=".*?"', str(img))
name1 = re.findall(r'alt=".*?"', str(img))
src2 = []
name2 = []
for i in src1:
src2.append(i.split('=')[1].replace("\"",""))
for i in name1:
name2.append(i.split('=')[1].replace("\"", ""))
if category == "Scute":
pattern = "S-cute"
else:
pattern = category
try:
temp = [x.replace("inggo.info", "paypp.xyz") for x in src2]
src3 = [x for x in temp if 'images.paypp.xyz/wp-content/uploads' in x]
except:
src3 = [x for x in src2 if 'images.paypp.xyz/wp-content/uploads' in x]
name3 = [x for x in name2 if pattern in x]
if len(name3) < 12:
name3 = name2
# 上面是name3和src3 保存了主页面的番号和相应的详情页的链接
# 接下来启动第二个浏览器对各个详情页的视频截图进行抓取
driver1 = webdriver.Chrome(options=chrome_opts)
for i in range(len(src3)):
try:
if '[' and '【' not in name3[i]:
title = name3[i]
else:
title = name3[i].split('【')[1].split('】')[0] # 简化一下番号的名字
except:
title = name3[i].split('[')[1].split(']')[0]
if i >= 1:
try:
if name3[i].split('[')[1].split(']')[0] == name3[i-1].split('[')[1].split(']')[0]:
title = name3[i].split(']')[1].replace("[","")
except:
pass
if i >= 1:
try:
if name3[i].split('【')[1].split('】')[0] == name3[i-1].split('【')[1].split('】')[0]:
title = name3[i].split('】')[1].replace("【","")
except:
pass
if title in Exist:
print("%s 已经下载!" % (title))
continue
# 前文提到的判断是否下过,如果是,后面就不用进行了
# 进入相应链接的详情页
driver1.get(src3[i])
img = driver1.find_element_by_xpath("//html/body/img")
img.screenshot(custom_path + title + '.jpg')
# wait = WebDriverWait(driver1, 10) # 等待浏览器相应,删除也可以
# pyautogui.rightClick(x=500, y=500) # 右击图片,位置可根据自己的屏幕调整
# pyautogui.typewrite(['V']) # 另存为的快捷键为 V
# time.sleep(2) # 等待电脑响应
# pyperclip.copy(custom_path + title + '.jpg') # 复制文件名加路径到粘贴板
# time.sleep(1)
# pyautogui.hotkey('ctrlleft', 'V') # 粘贴
# time.sleep(1)
# pyautogui.press('enter') # 确认
# time.sleep(1)
while True:
filelist = os.listdir(custom_path)
if title + '.jpg' in filelist:
with open(custom_path + 'history.txt', 'a+') as f:
f.writelines(title)
f.writelines('\n')
f.close()
print("%s 下载完成!" % (title))
break
else:
print("等待响应")
time.sleep(2)
# pyautogui.hotkey('ctrlleft', 'V') # 粘贴
# time.sleep(1)
# pyautogui.press('enter') # 确认
# time.sleep(1)
# 在txt中加入当前下载的图片名字
print("%s 下载完成!"%(title))
time.sleep(0.5)
driver1.quit()
print("第 %d 页爬完"%(page))
button = "//*[@class='next page-numbers']" #翻页按钮
driver.find_elements_by_xpath(button)[0].click()
except:
print("第 %d 页出错!"%(page))
driver1.quit()
try:
button = "//*[@class='next page-numbers']" #翻页按钮
driver.find_elements_by_xpath(button)[0].click()
except:
print("爬取完毕!")
break
continue
driver = open_browser(url)
time.sleep(2)
scrapy(driver)
|
ExcaliburEX/GHS
|
Crawl_51luxu.py
|
Crawl_51luxu.py
|
py
| 7,503 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "os.getcwd",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 142,
"usage_type": "call"
}
] |
7261153491
|
import cv2
import numpy as np
img = cv2.imread('bookpage.jpg')
grayscaled = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#retval, threshold = cv2.threshold(grayscaled, 11, 255 , cv2.THRESH_BINARY)
threshold = cv2.adaptiveThreshold(grayscaled, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 1)
median = cv2.medianBlur(threshold,3)
gaus = cv2.GaussianBlur(threshold,(5,5),0)
cv2.imshow('original',img)
cv2.imshow('gray',grayscaled)
cv2.imshow('threshold',threshold)
cv2.imshow('gaus',gaus)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
felipemateus/vis-oCompEstudo
|
threshHoldExemple2/threshHold.py
|
threshHold.py
|
py
| 541 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.imread",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "cv2.adaptiveThreshold",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.ADAPTIVE_THRESH_GAUSSIAN_C",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "cv2.medianBlur",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.GaussianBlur",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 21,
"usage_type": "call"
}
] |
22912396559
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from flask import Flask, request, jsonify, make_response, abort
import requests
from stations import stations
app = Flask(__name__)
name = [
'station_train_code',
'from_station_name',
'to_station_name',
'lishi',
'start_time',
'arrive_time',
'swz_num',
'tz_num',
'zy_num',
'ze_num',
'gr_num',
'rw_num',
'yw_num',
'rz_num',
'yz_num',
'wz_num',
'qt_num']
@app.route('/zd')
def zd_tickets():
tickets = []
date = request.args.get('Date')
from_station = request.args.get('from')
to_station = request.args.get('to')
if from_station in stations.keys() and to_station in stations.keys():
from_station = stations[from_station]
to_station = stations[to_station]
else:
abort(400)
url = 'https://kyfw.12306.cn/otn/lcxxcx/query?purpose_codes=ADULT&queryDate={}&from_station={}&to_station={}'.\
format(date, from_station, to_station)
r = requests.get(url, verify=False)
# if the url param 'date'was supplied incorrectly, 12306 would return -1 of int type.
if r.json() == -1:
abort(400)
if 'datas' in r.json()['data']:
contents = r.json()['data']['datas']
# get out the information that we want in the contents dict.
for content in contents:
ticket = {key: content[key] for key in name}
tickets.append(ticket)
else:
abort(404)
return jsonify({'tickets': tickets})
@app.route('/hc')
def hc_tickets():
tickets_1 = []
tickets_2 = []
date = request.args.get('Date')
from_station = request.args.get('from')
to_station = request.args.get('to')
changed_station = request.args.get('change')
if from_station in stations.keys() and to_station in stations.keys() and changed_station in stations.keys():
from_station = stations[from_station]
to_station = stations[to_station]
changed_station = stations[changed_station]
else:
abort(400)
url1 = 'https://kyfw.12306.cn/otn/lcxxcx/query?purpose_codes=ADULT&queryDate={}&from_station={}&to_station={}'. \
format(date, from_station, changed_station)
r1 = requests.get(url1, verify=False)
if r1.json() == -1:
abort(400)
if 'datas' in r1.json()['data']:
contents_1 = r1.json()['data']['datas']
for content in contents_1:
ticket = {key: content[key] for key in name}
tickets_1.append(ticket)
else:
abort(404)
for ticket in tickets_1:
if int(ticket['lishi'][:2]) + int(ticket['start_time'][:2]) < 24: # TODO
url2 = 'https://kyfw.12306.cn/otn/lcxxcx/query?purpose_codes=ADULT' \
'&queryDate={}&from_station={}&to_station={}'. \
format(date, changed_station, to_station)
r2 = requests.get(url2, verify=False)
if r2.json() == -1:
abort(400)
if 'datas' in r2.json()['data']:
contents_2 = r2.json()['data']['datas']
for content in contents_2:
ticket = {key: content[key] for key in name}
tickets_2.append(ticket)
if tickets_2:
for x in tickets_1:
x['changed_ticket'] = [y for y in tickets_2 if
1 < int(y['start_time'][:2]) - int(x['arrive_time'][:2]) < 3]
else:
abort(404)
else:
# if the first train arrived at next day, we add one day to the queryDate param.
date2 = str(datetime.strptime(date, '%Y-%m-%d') + timedelta(days=1))[:10]
# TODO: the following repeatedly codes should be moved to a helper method.
url2 = 'https://kyfw.12306.cn/otn/lcxxcx/query?purpose_codes=ADULT' \
'&queryDate={}&from_station={}&to_station={}'. \
format(date2, changed_station, to_station)
r2 = requests.get(url2, verify=False)
if r2.json() == -1:
abort(400)
if 'datas' in r2.json()['data']:
contents_2 = r2.json()['data']['datas']
for content in contents_2:
ticket = {key: content[key] for key in name}
tickets_2.append(ticket)
if tickets_2:
for x in tickets_1:
x['changed_ticket'] = [y for y in tickets_2 if
1 < int(y['start_time'][:2]) - int(x['arrive_time'][:2]) < 3]
return jsonify({'tickets': tickets_1})
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.errorhandler(400)
def bad_request(error):
return make_response(jsonify({'error': 'Please check your query param format'}), 400)
if __name__ == '__main__':
app.run(debug=True)
|
shenmj053/querytickets
|
tickets.py
|
tickets.py
|
py
| 5,024 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "stations.stations.keys",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "stations.stations",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "stations.stations",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "stations.stations",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "stations.stations.keys",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "stations.stations",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "stations.stations",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "stations.stations",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "stations.stations",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "flask.make_response",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "flask.make_response",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 150,
"usage_type": "call"
}
] |
34107122191
|
from flask import g, request, current_app
import iot_api_core
import time
class InstanceVersionBaseBehavior():
def __init__(self, widget_type, namespace, instance_id):
self.widget_type = widget_type
self.namespace = namespace
self.instance_id = instance_id
self.lumavate = iot_api_core.Lumavate()
self.temp = {}
self.post_version_create_handlers = []
self.experience_info = None
self._instance = None
self.status_id = None
data = request.get_json(silent=True)
if data:
self.status_id = data.get('statusId')
if self.namespace is None:
self.namespace = self.rest_get_single(self.instance_id)['namespace']
@property
def properties(self):
return []
@property
def components(self):
return []
@property
def page_security_property(self):
page_security = {
'always': 'Always Render',
'user-logged-in': 'Only Render When User Logged In',
'user-not-logged-in': 'Only Render When User NOT Logged In',
'prod-registered': 'Only Render When Product Registered',
'device-authorized': 'Only Render When Device Authorized',
'prod-not-registered': 'Only Render When Product NOT Registered',
'device-not-authorized': 'Only Render When Device NOT Authorized',
}
return iot_api_core.DropdownProperty('General', 'General Settings', 'pageSecurity', 'Page Security', self, default='always', options=page_security)
@property
def experience_id(self):
self.load_experience_info()
return self.experience_info.get('id')
@property
def model_id(self):
self.load_experience_info()
return self.experience_info.get('modelId')
def load(self, version_name):
instance = self.rest_get_single(self.instance_id)
data = instance[version_name + 'Version']['data']
for x in self.properties:
if not x.name.startswith('instance__'):
x.read(data)
def get_general_properties(self, include_auth=False):
return [
self.instance_name_property(),
self.instance_page_type_property(include_auth=include_auth),
iot_api_core.ToggleProperty('General', 'General Settings', 'displayBackgroundImage', 'Display Background Image', self, default=False),
iot_api_core.ImageProperty('General', 'General Settings', 'backgroundImage', 'Background Image', self),
iot_api_core.ColorProperty('General', 'General Settings', 'backgroundColor', 'Background Color', self, default='#e2e2e2'),
self.page_security_property
]
def load_experience_info(self):
if not self.experience_info:
results = self.lumavate.get('/iot/v1/experiences?siteName=' + self.namespace)
if len(results) > 0:
self.experience_info = results[0]
self.model_info = self.lumavate.get('/iot/v1/models/' + str(self.experience_info['modelId']))
else:
self.experience_info = {}
self.model_info = {}
def rest_get_single(self, id):
if self._instance is None:
self._instance = self.lumavate.get('/iot/v1/widget-instances/' + str(id))
return self._instance
def get_version_id(self, instance_id, version_name):
instance = self.rest_get_single(instance_id)
return str(instance[version_name + 'VersionId'])
def get_property(self, name):
return next((p for p in self.properties if p.name == name), None)
def get_collection_rest_uri(self):
return '/iot/v1/widget-instances/' + str(self.instance_id) + '/versions'
def get_single_rest_uri(self, version_id):
return self.get_collection_rest_uri() + '/' + str(version_id)
def rest_get_collection(self):
return self.lumavate.get(self.get_collection_rest_uri())
def instance_name_property(self):
instance = self.rest_get_single(self.instance_id)
return iot_api_core.TextProperty('General', 'General Settings', 'instance__name', 'Page Name', self, rows=0, default=instance['name'])
def resolve_images(self, data):
if isinstance(data, dict):
if 'key' in data and 'versionId' in data:
image_data = self.lumavate.post('/iot/v1/files/preview/' + data['key'] + '/' + data['versionId'], {})
for f in ['contentType', 'url', 'mobileUrl']:
if f in data:
image_data[f] = data.get(f)
return image_data
else:
return { k: self.resolve_images(data[k]) for k in data }
elif isinstance(data, list):
return [self.resolve_images(x) for x in data]
else:
return data
def collapse_language(self, data):
lang = 'en-us'
if isinstance(data, dict):
if lang in data:
return data[lang]
else:
return { k: self.collapse_language(data[k]) for k in data }
elif isinstance(data, list):
return [self.collapse_language(x) for x in data]
else:
return data
def instance_page_type_property(self, include_auth=False):
page_types = {
'home': 'Home',
'registration': 'Registration',
'auth': 'Auth',
'error': 'Error',
'normal': '<Normal>'
}
if include_auth == False:
del page_types['auth']
return iot_api_core.DropdownProperty('General', 'General Settings', 'pageType', 'Page Type', self, default='normal', options=page_types)
def rest_create(self):
instance = self.rest_get_single(self.instance_id)
if instance:
payload = {'data': self.validate_data(request.get_json())}
results = self.lumavate.post(self.get_collection_rest_uri() + '-direct', payload)
instance['futureVersionId'] = results['id']
for vch in self.post_version_create_handlers:
vch()
self.post_version_create_handlers = []
if instance.get('futureVersion') is not None:
results['delta'] = self.get_delta_document(instance.get('futureVersion').get('data'), results['data'])
else:
results['delta'] = results.get('data')
return results
def background(self, function, args=[], kwargs={}):
data = {
'namespace': self.namespace,
'widgetInstanceId': self.instance_id,
'widgetType': current_app.config['WIDGET_ID'],
'method': function.__name__,
'args': args,
'kwargs': kwargs,
'statusId': self.status_id
}
self.lumavate.post('/iot/v1/background', data)
def run_background(self):
self.lumavate.put('/iot/v1/statuses/' + self.status_id, {'percent': 100})
return {'a': 4}
data = request.get_json()
method = data.get('method')
args = data.get('args')
kwargs = data.get('kwargs')
getattr(self, method)(*args, **kwargs)
def publish(self, version_name):
instance = self.rest_get_single(self.instance_id)
if not instance:
return
if version_name not in ['future', 'draft', 'production', 'current']:
return
version = instance.get(version_name + 'Version')
#for x in self.properties:
# x.read(version['data'])
# x.publish()
if self.status_id:
self.lumavate.put('/iot/v1/statuses/' + self.status_id, {'percent': 100})
else:
print('NOPE', flush=True)
return {'Status': 'Ok'}
def store_data(self, category, record_id, data, latitude=None, longitude=None, version='future'):
version_id = self.get_version_id(self.instance_id, version)
payload = {
'recordId': str(record_id),
'data': data,
'latitude': latitude,
'longitude': longitude
}
return self.lumavate.post(self.get_single_rest_uri(version_id) + '/data/' + category, payload)
def clear_data(self, category, version='future'):
version_id = self.get_version_id(self.instance_id, version)
return self.lumavate.delete(self.get_single_rest_uri(version_id) + '/data/' + category)
def get_current_version(self):
if hasattr(g, 'iot_context'):
return g.iot_context['token_data']['version']
else:
return 'future'
def get_data(self, category, default=[], qs=''):
version_id = self.get_version_id(self.instance_id, self.get_current_version())
res = self.lumavate.get('/iot/v1/widget-instances/' + str(self.instance_id) + '/versions/' + version_id + '/data/' + category + '?' + qs)
for x in res:
if 'distance' in x:
x['data']['distance'] = x['distance']
res = [x['data'] for x in res]
if len(res) == 0:
return default
else:
return res
def load_activation_info(self):
api_result = {}
try:
api_result = self.lumavate.get('/iot/v1/labels/' + str(g.iot_context['token_data'].get('activationId', 0)))
except Exception as e:
pass
return {
'key': api_result.get('key'),
'serialNumber': api_result.get('serialNumber')
}
def get_config_data(self):
# Check if there is a valid version for the current context
instance = self.rest_get_single(self.instance_id)
if instance[self.get_current_version() + 'Version'] is None:
raise Exception('Version ' + self.get_current_version() + ' does not exist for instance ' + str(self.instance_id))
result = instance[self.get_current_version() + 'Version']['data']
# Is there any activation to report?
result['activation'] = self.load_activation_info()
# Are there any 'special' pages that the UI should know about?
result['authCheck'] = None
if g.iot_context['token_data'].get('authUrl') is not None:
root, part, instance = g.iot_context['token_data']['authUrl'].rpartition('/')
result['authCheck'] = '{}/api/instances/{}/check-login-status'.format(root, instance)
return result
def default_image_data(self, data, prop):
return {
'preview': data.get(prop, {}).get('preview', '/icons/iot/page/api/instances/icons/no_image_available.png'),
'previewLarge': data.get(prop, {}).get('previewLarge', '/icons/iot/page/api/instances/icons/no_image_available.png'),
'previewMedium': data.get(prop, {}).get('previewMedium', '/icons/iot/page/api/instances/icons/no_image_available.png'),
'previewSmall': data.get(prop, {}).get('previewSmall', '/icons/iot/page/api/instances/icons/no_image_available.png')
}
def get_delta_document(self, original, current):
if original is None:
return current
else:
result = {}
for x in self.properties:
dd = x.delta_doc(original.get(x.name), current.get(x.name))
if dd is not None:
result[x.name] = dd
return result
def validate_data(self, data):
result = {}
instance_payload = {}
for x in self.properties:
if x.name.startswith('instance__'):
field = x.name.split('__')[-1]
new_val = x.read(data)
if self._instance[field] != new_val:
self._instance[field] = new_val
instance_payload[x.name.split('__')[-1]] = x.read(data)
else:
result[x.name] = x.read(data)
if len(instance_payload.keys()) > 0:
self.lumavate.put('/iot/v1/widget-instances/' + str(self.instance_id), instance_payload)
return result
def get_all_components(self):
return [
{
'label': x.instantiate().label,
'type': x.instantiate().component_type,
'icon': x.instantiate().icon_url,
'section': x.instantiate().section,
'category': x.instantiate().category
} for x in self.components
]
def get_component_properties(self, component_type):
comp = next((x.instantiate() for x in self.components if x.instantiate().component_type == component_type), None)
if comp:
return comp.get_properties()
def get_component_property(self, component_type, property_name):
comp = next((x.instantiate() for x in self.components if x.instantiate().component_type == component_type), None)
if comp:
return comp.get_property(property_name)
def get_widget_properties(self):
return [x.to_json() for x in self.properties]
def handle_language_fields(self, data):
lang = 'en-us'
if isinstance(data, dict):
if lang in data:
return self.handle_language_fields(data[lang])
else:
return { k: self.handle_language_fields(data[k]) for k in data }
if isinstance(data, list):
return [ self.handle_language_fields(x) for x in data ]
else:
return data
|
Lumavate-Team/python-hello
|
app/iot_api_core/instance_version_base.py
|
instance_version_base.py
|
py
| 12,129 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "iot_api_core.Lumavate",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "iot_api_core.DropdownProperty",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "iot_api_core.ToggleProperty",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "iot_api_core.ImageProperty",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "iot_api_core.ColorProperty",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "iot_api_core.TextProperty",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "iot_api_core.DropdownProperty",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "flask.current_app.config",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "flask.current_app",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "flask.request.get_json",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "flask.g",
"line_number": 229,
"usage_type": "argument"
},
{
"api_name": "flask.g.iot_context",
"line_number": 230,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 230,
"usage_type": "name"
},
{
"api_name": "flask.g.iot_context",
"line_number": 253,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 253,
"usage_type": "name"
},
{
"api_name": "flask.g.iot_context",
"line_number": 275,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 275,
"usage_type": "name"
},
{
"api_name": "flask.g.iot_context",
"line_number": 276,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 276,
"usage_type": "name"
}
] |
26112891425
|
__authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "07/01/2019"
import os
import logging
import weakref
from . import qt
import silx.resources
from silx.utils import weakref as silxweakref
_logger = logging.getLogger(__name__)
"""Module logger"""
_cached_icons = None
"""Cache loaded icons in a weak structure"""
def getIconCache():
"""Get access to all cached icons
:rtype: dict
"""
global _cached_icons
if _cached_icons is None:
_cached_icons = weakref.WeakValueDictionary()
# Clean up the cache before leaving the application
# See https://github.com/silx-kit/silx/issues/1771
qt.QApplication.instance().aboutToQuit.connect(cleanIconCache)
return _cached_icons
def cleanIconCache():
"""Clean up the icon cache"""
_logger.debug("Clean up icon cache")
_cached_icons.clear()
_supported_formats = None
"""Order of file format extension to check"""
class AbstractAnimatedIcon(qt.QObject):
"""Store an animated icon.
It provides an event containing the new icon everytime it is updated."""
def __init__(self, parent=None):
"""Constructor
:param qt.QObject parent: Parent of the QObject
:raises: ValueError when name is not known
"""
qt.QObject.__init__(self, parent)
self.__targets = silxweakref.WeakList()
self.__currentIcon = None
iconChanged = qt.Signal(qt.QIcon)
"""Signal sent with a QIcon everytime the animation changed."""
def register(self, obj):
"""Register an object to the AbstractAnimatedIcon.
If no object are registred, the animation is paused.
Object are stored in a weaked list.
:param object obj: An object
"""
if obj not in self.__targets:
self.__targets.append(obj)
self._updateState()
def unregister(self, obj):
"""Remove the object from the registration.
If no object are registred the animation is paused.
:param object obj: A registered object
"""
if obj in self.__targets:
self.__targets.remove(obj)
self._updateState()
def hasRegistredObjects(self):
"""Returns true if any object is registred.
:rtype: bool
"""
return len(self.__targets)
def isRegistered(self, obj):
"""Returns true if the object is registred in the AbstractAnimatedIcon.
:param object obj: An object
:rtype: bool
"""
return obj in self.__targets
def currentIcon(self):
"""Returns the icon of the current frame.
:rtype: qt.QIcon
"""
return self.__currentIcon
def _updateState(self):
"""Update the object according to the connected objects."""
pass
def _setCurrentIcon(self, icon):
"""Store the current icon and emit a `iconChanged` event.
:param qt.QIcon icon: The current icon
"""
self.__currentIcon = icon
self.iconChanged.emit(self.__currentIcon)
class MovieAnimatedIcon(AbstractAnimatedIcon):
"""Store a looping QMovie to provide icons for each frames.
Provides an event with the new icon everytime the movie frame
is updated."""
def __init__(self, filename, parent=None):
"""Constructor
:param str filename: An icon name to an animated format
:param qt.QObject parent: Parent of the QObject
:raises: ValueError when name is not known
"""
AbstractAnimatedIcon.__init__(self, parent)
qfile = getQFile(filename)
self.__movie = qt.QMovie(qfile.fileName(), qt.QByteArray(), parent)
self.__movie.setCacheMode(qt.QMovie.CacheAll)
self.__movie.frameChanged.connect(self.__frameChanged)
self.__cacheIcons = {}
self.__movie.jumpToFrame(0)
self.__updateIconAtFrame(0)
def __frameChanged(self, frameId):
"""Callback everytime the QMovie frame change
:param int frameId: Current frame id
"""
self.__updateIconAtFrame(frameId)
def __updateIconAtFrame(self, frameId):
"""
Update the current stored QIcon
:param int frameId: Current frame id
"""
if frameId in self.__cacheIcons:
icon = self.__cacheIcons[frameId]
else:
icon = qt.QIcon(self.__movie.currentPixmap())
self.__cacheIcons[frameId] = icon
self._setCurrentIcon(icon)
def _updateState(self):
"""Update the movie play according to internal stat of the
MovieAnimatedIcon."""
self.__movie.setPaused(not self.hasRegistredObjects())
class MultiImageAnimatedIcon(AbstractAnimatedIcon):
"""Store a looping QMovie to provide icons for each frames.
Provides an event with the new icon everytime the movie frame
is updated."""
def __init__(self, filename, parent=None):
"""Constructor
:param str filename: An icon name to an animated format
:param qt.QObject parent: Parent of the QObject
:raises: ValueError when name is not known
"""
AbstractAnimatedIcon.__init__(self, parent)
self.__frames = []
for i in range(100):
try:
frame_filename = os.sep.join((filename, ("%02d" %i)))
frame_file = getQFile(frame_filename)
except ValueError:
break
try:
icon = qt.QIcon(frame_file.fileName())
except ValueError:
break
self.__frames.append(icon)
if len(self.__frames) == 0:
raise ValueError("Animated icon '%s' do not exists" % filename)
self.__frameId = -1
self.__timer = qt.QTimer(self)
self.__timer.timeout.connect(self.__increaseFrame)
self.__updateIconAtFrame(0)
def __increaseFrame(self):
"""Callback called every timer timeout to change the current frame of
the animation
"""
frameId = (self.__frameId + 1) % len(self.__frames)
self.__updateIconAtFrame(frameId)
def __updateIconAtFrame(self, frameId):
"""
Update the current stored QIcon
:param int frameId: Current frame id
"""
self.__frameId = frameId
icon = self.__frames[frameId]
self._setCurrentIcon(icon)
def _updateState(self):
"""Update the object to wake up or sleep it according to its use."""
if self.hasRegistredObjects():
if not self.__timer.isActive():
self.__timer.start(100)
else:
if self.__timer.isActive():
self.__timer.stop()
def getWaitIcon():
"""Returns a cached version of the waiting AbstractAnimatedIcon.
:rtype: AbstractAnimatedIcon
"""
return getAnimatedIcon("process-working")
def getAnimatedIcon(name):
"""Create an AbstractAnimatedIcon from a resource name.
The resource name can be prefixed by the name of a resource directory. For
example "silx:foo.png" identify the resource "foo.png" from the resource
directory "silx".
If no prefix are specified, the file with be returned from the silx
resource directory with a specific path "gui/icons".
See also :func:`silx.resources.register_resource_directory`.
Try to load a mng or a gif file, then try to load a multi-image animated
icon.
In Qt5 mng or gif are not used, because the transparency is not very well
managed.
:param str name: Name of the icon, in one of the defined icons
in this module.
:return: Corresponding AbstractAnimatedIcon
:raises: ValueError when name is not known
"""
key = name + "__anim"
cached_icons = getIconCache()
if key not in cached_icons:
qtMajorVersion = int(qt.qVersion().split(".")[0])
icon = None
# ignore mng and gif in Qt5
if qtMajorVersion != 5:
try:
icon = MovieAnimatedIcon(name)
except ValueError:
icon = None
if icon is None:
try:
icon = MultiImageAnimatedIcon(name)
except ValueError:
icon = None
if icon is None:
raise ValueError("Not an animated icon name: %s", name)
cached_icons[key] = icon
else:
icon = cached_icons[key]
return icon
def getQIcon(name):
"""Create a QIcon from its name.
The resource name can be prefixed by the name of a resource directory. For
example "silx:foo.png" identify the resource "foo.png" from the resource
directory "silx".
If no prefix are specified, the file with be returned from the silx
resource directory with a specific path "gui/icons".
See also :func:`silx.resources.register_resource_directory`.
:param str name: Name of the icon, in one of the defined icons
in this module.
:return: Corresponding QIcon
:raises: ValueError when name is not known
"""
cached_icons = getIconCache()
if name not in cached_icons:
qfile = getQFile(name)
icon = qt.QIcon(qfile.fileName())
cached_icons[name] = icon
else:
icon = cached_icons[name]
return icon
def getQPixmap(name):
"""Create a QPixmap from its name.
The resource name can be prefixed by the name of a resource directory. For
example "silx:foo.png" identify the resource "foo.png" from the resource
directory "silx".
If no prefix are specified, the file with be returned from the silx
resource directory with a specific path "gui/icons".
See also :func:`silx.resources.register_resource_directory`.
:param str name: Name of the icon, in one of the defined icons
in this module.
:return: Corresponding QPixmap
:raises: ValueError when name is not known
"""
qfile = getQFile(name)
return qt.QPixmap(qfile.fileName())
def getQFile(name):
"""Create a QFile from an icon name. Filename is found
according to supported Qt formats.
The resource name can be prefixed by the name of a resource directory. For
example "silx:foo.png" identify the resource "foo.png" from the resource
directory "silx".
If no prefix are specified, the file with be returned from the silx
resource directory with a specific path "gui/icons".
See also :func:`silx.resources.register_resource_directory`.
:param str name: Name of the icon, in one of the defined icons
in this module.
:return: Corresponding QFile
:rtype: qt.QFile
:raises: ValueError when name is not known
"""
global _supported_formats
if _supported_formats is None:
_supported_formats = []
supported_formats = qt.supportedImageFormats()
order = ["mng", "gif", "svg", "png", "jpg"]
for format_ in order:
if format_ in supported_formats:
_supported_formats.append(format_)
if len(_supported_formats) == 0:
_logger.error("No format supported for icons")
else:
_logger.debug("Format %s supported", ", ".join(_supported_formats))
for format_ in _supported_formats:
format_ = str(format_)
filename = silx.resources._resource_filename('%s.%s' % (name, format_),
default_directory='gui/icons')
qfile = qt.QFile(filename)
if qfile.exists():
return qfile
_logger.debug("File '%s' not found.", filename)
raise ValueError('Not an icon name: %s' % name)
|
silx-kit/silx
|
src/silx/gui/icons.py
|
icons.py
|
py
| 11,642 |
python
|
en
|
code
| 106 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "weakref.WeakValueDictionary",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "silx.utils.weakref.WeakList",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "silx.utils.weakref",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "os.sep.join",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "os.sep",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "silx.resources.resources._resource_filename",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "silx.resources.resources",
"line_number": 373,
"usage_type": "attribute"
},
{
"api_name": "silx.resources",
"line_number": 373,
"usage_type": "name"
}
] |
12571576568
|
import requests
import pandas
from bs4 import BeautifulSoup
import json
url = 'https://www.imdb.com/chart/top/'
response = requests.get(url).content
soup = BeautifulSoup(response,'html.parser')
title = soup.find_all('td', class_='titleColumn')
rating = soup.find_all('strong')
images = soup.find_all('img')
movie_name = []
movie_year =[]
movie_href =[]
movie_image = []
movie_rating = []
for t in title:
imdb_title_num = t.a.get('href').split('/')[2]
href = 'https://www.imdb.com/title/'+imdb_title_num
movie_href.append(href)
imdb_title = t.a.text
movie_name.append(imdb_title)
year = t.span.text
movie_year.append(year)
for rate in rating:
r = rate.text
movie_rating.append(r)
for img in images:
i = img.get('src')
movie_image.append(i)
model = pandas.DataFrame({'title': movie_name, 'year': movie_year, 'rating': movie_rating, 'image': movie_image, 'href': movie_href})
model.to_json('movies_data.json', orient="records")
|
gpuligundla/IMDB-Top-Movies-List
|
imdb_scrap.py
|
imdb_scrap.py
|
py
| 979 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 41,
"usage_type": "call"
}
] |
39697194939
|
# -*- coding: utf-8 -*-
import scrapy
from LaGou.items import LagouItem
import LaGou.settings as settings
class LagouSpider(scrapy.Spider):
name = 'lagou'
allowed_domains = ['https://www.lagou.com']
start_urls = ['http://https://www.lagou.com/']
def parse(self, response):
if response.status==200:
items=response.css('ul .con_list_item')
data=LagouItem()
for item in items:
data['position']=item.css('.p_top .position_link h3::text').extract_first()
data['company']=item.css('.company_name a::text').extract_first()
data['sadd']=item.css('.p_top .add em::text').extract_first()
data['salary']=item.css('.li_b_l .money::text').extract_first()
data['claim']=item.css('.p_bot .li_b_l::text').extract()[-1].strip()
# data['tags']=item.css('.list_item_bot .li_b_l span::text').extract()
data['joburl']=item.css('.p_top .position_link::attr(href)').extract_first()
yield data
def start_requests(self):
for page in range(3,settings.MAX_PAGE+1):
url='https://www.lagou.com/jobs/list_%s?city=%s&cl=false&fromSearch=true'%(settings.KEY,settings.CITY) #输入uid
yield scrapy.Request(url=url,callback=self.parse,meta={'page':page},dont_filter=True)
|
siqyka/Reptile
|
works/LaGou/LaGou/spiders/lagou.py
|
lagou.py
|
py
| 1,369 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "scrapy.Spider",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "LaGou.items.LagouItem",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "LaGou.settings.MAX_PAGE",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "LaGou.settings",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "LaGou.settings.KEY",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "LaGou.settings",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "LaGou.settings.CITY",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 31,
"usage_type": "call"
}
] |
19482945267
|
import math
import random
import numba as nb
import numpy as np
@nb.jit(nopython=True)
def dimension_selector_uniform(n_dimensions):
return random.randrange(n_dimensions)
def get_dimension_selector_expovariate(
lambd=None,
rel_lambd=None,
):
if lambd is not None and rel_lambd is not None:
raise ValueError("Cannot set both lambd and rel_lambd")
if lambd is None and rel_lambd is None:
# the default, using a rel_lambd of 4.0, placing the pseudo-mean
# 1/4 of the way into the dimensions
rel_lambd = 4.
@nb.jit(nopython=True)
def dimension_selector_expovariate(n_dimensions):
nonlocal lambd, rel_lambd
value = math.inf
while value >= n_dimensions:
value = random.expovariate(
lambd if lambd is not None else rel_lambd/n_dimensions
)
return int(value)
return dimension_selector_expovariate
def get_finder_for_cluster_obeying(
check_func,
min_count=1,
max_count=-1,
max_depth=-1,
dimension_selector=dimension_selector_uniform,
fixed_dimensional_parameters=-1,
fixed_non_dimensional_parameters=-1,
fixed_n=-1,
verbose=False,
jit_kwargs={},
):
@nb.jit(nopython=True, **jit_kwargs)
def _find_cluster_obeying(
dimensional_parameters,
non_dimensional_parameters,
random_seed=None,
iterations=-1,
):
if dimensional_parameters.shape[1] != non_dimensional_parameters.shape[0]:
raise ValueError(
"Minor dimension of dimensional_parameters must match "
"major dimension of non_dimensional_parameters"
)
if (
fixed_dimensional_parameters != -1
and fixed_dimensional_parameters != dimensional_parameters.shape[0]
):
raise ValueError("Number of dimensional parameters not expected value")
if (
fixed_non_dimensional_parameters != -1
and fixed_non_dimensional_parameters != non_dimensional_parameters.shape[1]
):
raise ValueError("Number of non-dimensional parameters not expected value")
if (
fixed_n != -1
and fixed_n != non_dimensional_parameters.shape[0]
):
raise ValueError("Number of candidates not expected value")
final_max_depth = max_depth if max_depth != -1 else (1 + int(
math.floor(math.log(dimensional_parameters.shape[-1]) / math.log(2))
))
if final_max_depth < 2:
raise ValueError("max_depth < 2 makes no sense")
if random_seed is not None:
random.seed(random_seed)
bitmap_stack = np.zeros(
(final_max_depth, dimensional_parameters.shape[-1]),
dtype=np.bool_,
)
bitmap_stack[0,:] = True
right_branch_stack = np.zeros((final_max_depth,), dtype=np.int8)
current_level = 1
iteration = 0
while True:
if right_branch_stack[current_level] == 0:
chosen_dimension = dimension_selector(dimensional_parameters.shape[0])
vals_count = 0
# initialize these to any value of the correct type
vals_min = vals_max = dimensional_parameters[0,0]
# scan for range of remaining values in this dimension
for i in range(dimensional_parameters.shape[1]):
if bitmap_stack[current_level-1,i]:
v = dimensional_parameters[chosen_dimension,i]
if vals_count == 0:
vals_min = vals_max = v
else:
vals_min = min(vals_min, v)
vals_max = max(vals_max, v)
vals_count+=1
chosen_split_point = random.uniform(vals_min, vals_max)
# mark values greater than threshold
remaining_count = 0
for i in range(dimensional_parameters.shape[1]):
if bitmap_stack[current_level-1,i]:
is_chosen = (
dimensional_parameters[chosen_dimension,i] >= chosen_split_point
)
bitmap_stack[current_level,i] = is_chosen
if is_chosen:
remaining_count+=1
elif right_branch_stack[current_level] == 1:
# invert current_level's bitmap, masked by the previous level's
remaining_count = 0
for i in range(bitmap_stack.shape[1]):
if bitmap_stack[current_level-1,i]:
is_chosen = not bitmap_stack[current_level,i]
bitmap_stack[current_level,i] = is_chosen
if is_chosen:
remaining_count+=1
else:
# tidy up then unwind
right_branch_stack[current_level] = 0
bitmap_stack[current_level,:] = False
if current_level > 1:
current_level-=1
# advance branch at underlying level
right_branch_stack[current_level]+=1
else:
# we're at the root
iteration+=1
if iterations != -1 and iteration >= iterations:
return None
# start again by continuing at the
# same current_level
continue
if verbose:
print("current_level = ", current_level, " remaining_count = ", remaining_count)
if remaining_count < min_count:
right_branch_stack[current_level]+=1
continue
if max_count == -1 or remaining_count <= max_count:
ndp_subset = np.empty(
(remaining_count, non_dimensional_parameters.shape[1],),
dtype=non_dimensional_parameters.dtype,
)
j = 0
for i in range(bitmap_stack.shape[1]):
if j < remaining_count and bitmap_stack[current_level,i]:
for k in range(ndp_subset.shape[1]):
ndp_subset[j,k] = non_dimensional_parameters[i,k]
j+=1
check_result = check_func(ndp_subset)
if check_result:
if check_result > 0:
return bitmap_stack[current_level,:]
else:
# negative result signals to stop checking this branch
right_branch_stack[current_level]+=1
continue
if remaining_count <= 1:
# dividing any more makes no sense
right_branch_stack[current_level]+=1
continue
if current_level+1 >= final_max_depth:
# can't descend any deeper
right_branch_stack[current_level]+=1
continue
current_level+=1
return _find_cluster_obeying
|
risicle/cluscheck
|
cluscheck/__init__.py
|
__init__.py
|
py
| 7,246 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "random.randrange",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numba.jit",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "math.inf",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "random.expovariate",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numba.jit",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.bool_",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.int8",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "random.uniform",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "numba.jit",
"line_number": 51,
"usage_type": "call"
}
] |
37482811265
|
# coding: utf-8
import json
import os
import click
import gql
import graphql
import requests
from gql.transport.requests import RequestsHTTPTransport
try:
# python2
from urlparse import urlparse
except ImportError:
# python3
from urllib.parse import urlparse
class SchemaSourceType(click.ParamType):
name = 'schema_source'
def __init__(self, authenvvar=None, **kwargs):
self.authenvvar = authenvvar
return super().__init__(**kwargs)
def convert_from_url(self, value, param, ctx):
headers = {}
if self.authenvvar is not None:
headers['Authorization'] = os.environ.get(self.authenvvar)
try:
client = gql.Client(
transport=RequestsHTTPTransport(
url=value, headers=headers, use_json=True,
),
fetch_schema_from_transport=True,
)
except requests.exceptions.HTTPError as e:
m = str(e)
if self.authenvvar is not None and e.response.status_code == 401:
m += ' : Try setting %s in the environment.' % self.authenvvar
self.fail(m, param=param, ctx=ctx)
except (
requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
requests.exceptions.RequestException
) as e:
self.fail(e, param=param, ctx=ctx)
return client.schema
def convert_from_file(self, value, param, ctx):
f = click.File('r').convert(value, param, ctx)
try:
introspection = json.load(f)['data']
schema = graphql.build_client_schema(introspection)
except (ValueError, KeyError) as e:
self.fail(
'File content is not valid a graphql schema %s.' % e,
param=param, ctx=ctx
)
return schema
def convert(self, value, param, ctx):
parsedurl = urlparse(value)
if parsedurl.scheme and parsedurl.netloc:
schema = self.convert_from_url(value, param, ctx)
else:
schema = self.convert_from_file(value, param, ctx)
return schema
SCHEMA_SOURCE = SchemaSourceType()
|
wapiflapi/gqldiff
|
gqldiff/clickgql.py
|
clickgql.py
|
py
| 2,223 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "click.ParamType",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "gql.Client",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "gql.transport.requests.RequestsHTTPTransport",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "requests.exceptions",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "requests.exceptions",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "requests.exceptions",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "click.File",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "graphql.build_client_schema",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "urllib.parse.urlparse",
"line_number": 72,
"usage_type": "call"
}
] |
72784612027
|
# https://www.codewars.com/kata/558d5c71c68d1e86b000010f
from itertools import product as P
from collections import Counter
# precompute
vampires = []
for L in (2,3):
G = [range(0,10) for _ in range(L)]
limit1 = 10**(2*L-1)
limit2 = 10**L**2-1
for a,b in P(P(*G), P(*G)):
p = int(''.join(map(str,a))) * int(''.join(map(str,b)))
if limit1 < p < limit2 and Counter(str(p)) == Counter(map(str,a+b)) and a[-1]+b[-1] != 0:
vampires.append(p)
vampires = sorted(list(set(vampires)))
def vampire_number(k):
return vampires[k-1]
# clever
is_vampire = lambda x, y: sorted(f"{x}{y}") == sorted(f"{x*y}") and x%10 + y%10 > 0
vampires = sorted({x*y for p in (1, 2) for x in range(10**p, 10**(p+1)) for y in range(x, 10**(p+1)) if is_vampire(x, y)})
# ya clever
vampires = set()
for i in [1, 2]:
for x, y in combinations(range(10**i, 10**(i+1)), 2):
if x % 10 == 0 == y % 10:
continue
z = x * y
if sorted(str(z)) == sorted(f'{x}{y}'):
vampires.add(z)
xs = sorted(vampires)
|
blzzua/codewars
|
7-kyu/vampire_numbers_less_than_1000000.py
|
vampire_numbers_less_than_1000000.py
|
py
| 1,072 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "itertools.product",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 14,
"usage_type": "call"
}
] |
36310398862
|
import pygame
import colors
import config
class Field(pygame.sprite.Sprite):
def __init__(self, x, y, color=None, img_path=None):
super().__init__()
self.x = x
self.y = y
self.color = color
# sprite image
if img_path:
img = pygame.image.load(img_path).convert_alpha()
img = pygame.transform.scale(img, [config.FIELD_WIDTH, config.FIELD_HEIGHT])
self.base_img = img
self.image = img
self.rect = self.image.get_rect()
self.rect.x = x * config.FIELD_WIDTH
self.rect.y = y * config.FIELD_HEIGHT
else:
self.rect = pygame.Rect(x * config.FIELD_WIDTH, y * config.FIELD_HEIGHT, config.FIELD_WIDTH, config.FIELD_HEIGHT)
self.image = None
def draw(self, screen):
# draw field (background)
if self.image:
screen.blit(self.image, (self.rect.x, self.rect.y))
# # draw field border
# pygame.draw.rect(screen, colors.WHITE, self.rect, 1)
|
tobnie/human_planning_horizon
|
game/world/field.py
|
field.py
|
py
| 1,046 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.sprite",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "config.FIELD_WIDTH",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "config.FIELD_HEIGHT",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "config.FIELD_WIDTH",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "config.FIELD_HEIGHT",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "config.FIELD_WIDTH",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "config.FIELD_HEIGHT",
"line_number": 25,
"usage_type": "attribute"
}
] |
22610452246
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import Hybrid, Specialization, ContactPerson, Subject
class MyUserAdmin(UserAdmin):
model = Hybrid
fieldsets = UserAdmin.fieldsets + (
(None, {'fields': (
'middle_name',
'member',
'graduation_year',
'image',
'gender',
'specialization',
'date_of_birth',
'title',
'card_key'
)}),
)
class ContactPersonAdmin(admin.ModelAdmin):
model = ContactPerson
list_display = ('title', 'search_name')
admin.site.register(Hybrid, MyUserAdmin)
admin.site.register(Specialization)
admin.site.register(ContactPerson, ContactPersonAdmin)
admin.site.register(Subject)
|
hybrida/hybridjango
|
apps/registration/admin.py
|
admin.py
|
py
| 792 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "django.contrib.auth.admin.UserAdmin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "models.Hybrid",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.admin.UserAdmin.fieldsets",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.admin.UserAdmin",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "models.ContactPerson",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "models.Hybrid",
"line_number": 29,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "models.Specialization",
"line_number": 30,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "models.ContactPerson",
"line_number": 31,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "models.Subject",
"line_number": 32,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 32,
"usage_type": "name"
}
] |
40610907305
|
# Import libraries
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import missingno as msno
from _datetime import date
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors import LocalOutlierFactor
from sklearn.preprocessing import MinMaxScaler, LabelEncoder, StandardScaler, RobustScaler
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('display.float_format', lambda x: '%.3f' % x)
pd.set_option('display.width', 500)
# Import the dataset for the small-scale applications
def load():
data = pd.read_csv('01_miuul_machine_learning_summercamp/00_datasets/titanic.csv')
data.columns = [col.lower() for col in data.columns]
return data
df = load()
df.head()
##############################
# Standard scaler
##############################
ss = StandardScaler()
df['age_standard_scaler'] = ss.fit_transform(df[['age']])
df.head()
##############################
# Robust scaler
##############################
rs = RobustScaler()
df['age_robust_scaler'] = rs.fit_transform(df[['age']])
df.head()
##############################
# MinMax scaler
##############################
mms = MinMaxScaler()
df['age_min_max_scaler'] = mms.fit_transform(df[['age']])
df.head()
df.describe().T
##############################
# Getting num_summary function
##############################
def num_summary(dataframe, col_name, plot=False):
"""
for col in num_cols:
print(f'\n***************-{col.upper()}-***************')
num_summary(df, col, plot=False)
"""
quantiles = [0.05, 0.25, 0.50, 0.75, 0.90, 0.95, 0.99]
print(dataframe[col_name].describe(quantiles).T)
if plot:
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
sns.boxplot(y=dataframe[col_name], data=dataframe)
plt.subplot(1, 2, 2)
sns.histplot(x=dataframe[col_name], data=dataframe)
plt.show(block=True)
age_cols = [col for col in df.columns if 'age' in col]
for col in age_cols:
print(f'\n***************-{col.upper()}-***************')
num_summary(df, col, plot=True)
##############################
# Converting numerical variables to categorical variables
##############################
df['age_qcut'] = pd.qcut(df.age, 5)
df.head()
df['age_cut'] = pd.cut(df.age, bins=[0, 18, 25, 45, 60, 100], labels=['0_18', '19_25', '26_45', '46_60', '61_100'])
df.head()
|
afatsumcemreg/feature_engineering
|
05_feature_scaling.py
|
05_feature_scaling.py
|
py
| 2,572 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "pandas.set_option",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.RobustScaler",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.MinMaxScaler",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "seaborn.boxplot",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "seaborn.histplot",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "pandas.qcut",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "pandas.cut",
"line_number": 87,
"usage_type": "call"
}
] |
1370868487
|
"""Quizzes user on terms and definitions."""
import csv
import random
from collections import namedtuple
RTN = lambda: '\n'
def open_csv_populate_dct():
"""Import a csv and populate a dictionary with its contents."""
dct = {}
with open('csvs/terms_and_definitions.csv') as f:
F_CSV = csv.reader(f)
ROW = namedtuple('Row', next(F_CSV))
for r in F_CSV:
row = ROW(*r)
dct[row.term] = row.definition
return dct
def quiz_user():
"""Quiz user."""
lst = []
print(RTN())
for term, definition in sorted(TERMS_AND_DEFINITIONS.items(),
key=lambda x: random.random()):
print(term)
user_answer = input('> ')
random.choice(list(TERMS_AND_DEFINITIONS))
if user_answer == definition:
print('correct')
lst.append((term, 'correct'))
print(RTN())
else:
print('work on that one')
print(f'The correct answer is: {definition}')
lst.append((term, 'incorrect'))
print(RTN())
return lst
def count_results():
"""Count correct answers."""
lst = []
for i in results:
if i[1] == 'correct':
lst.append('correct')
else:
pass
return lst
def calc_perc(correct_answers, total):
"""Calculate percentage of correct answers."""
perc = len(correct_answers) / total * 100
perc_correct = '{0:.2f}%'.format(perc)
print(f'percent correct: {perc_correct}\n')
TERMS_AND_DEFINITIONS = open_csv_populate_dct()
TERMS_TOTAL = len(TERMS_AND_DEFINITIONS)
results = quiz_user()
corrects = count_results()
calc_perc(corrects, TERMS_TOTAL)
|
craighillelson/terms_and_definitions
|
terms_and_definitions.py
|
terms_and_definitions.py
|
py
| 1,717 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "csv.reader",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 31,
"usage_type": "call"
}
] |
10981447634
|
'''
Created on Oct 26, 2015
@author: jcheung
Developed for Python 2. May work for Python 3 too (but I never tried) with minor changes.
'''
import xml.etree.cElementTree as ET
import codecs
class WSDInstance:
def __init__(self, my_id, lemma, context, index):
self.id = my_id # id of the WSD instance
self.lemma = lemma # lemma of the word whose sense is to be resolved
self.context = context # lemma of all the words in the sentential context
self.index = index # index of lemma within the context
def __str__(self):
'''
For printing purposes.
'''
return '%s\t%s\t%s\t%d' % (self.id, self.lemma, ' '.join(self.context), self.index)
def load_instances(f):
'''
Load two lists of cases to perform WSD on. The structure that is returned is a dict, where
the keys are the ids, and the values are instances of WSDInstance.
'''
tree = ET.parse(f)
root = tree.getroot()
dev_instances = {}
test_instances = {}
for text in root:
if text.attrib['id'].startswith('d001'):
instances = dev_instances
else:
instances = test_instances
for sentence in text:
# construct sentence context
context = [to_ascii(el.attrib['lemma']) for el in sentence]
for i, el in enumerate(sentence):
if el.tag == 'instance':
my_id = el.attrib['id']
lemma = to_ascii(el.attrib['lemma'])
instances[my_id] = WSDInstance(my_id, lemma, context, i)
return dev_instances, test_instances
def load_key(f):
'''
Load the solutions as dicts.
Key is the id
Value is the list of correct sense keys.
'''
dev_key = {}
test_key = {}
for line in open(f):
if len(line) <= 1: continue
#print (line)
doc, my_id, sense_key = line.strip().split(' ', 2)
if doc == 'd001':
dev_key[my_id] = sense_key.split()
else:
test_key[my_id] = sense_key.split()
return dev_key, test_key
def to_ascii(s):
# remove all non-ascii characters
return codecs.encode(s, 'ascii', 'ignore')
|
JGuymont/lesk-algorithm
|
lesk/loader.py
|
loader.py
|
py
| 2,227 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "xml.etree.cElementTree.parse",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "xml.etree.cElementTree",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "codecs.encode",
"line_number": 70,
"usage_type": "call"
}
] |
972412310
|
###Episode 1
import numpy as np
import torch
#Training data
# Input (temp, rainfall, humidity)
inputs = np.array([[73,67,43], [91,88,64], [87, 134, 58], [108, 43, 37], [69, 96,70]], dtype='float32')
# targets (apples, oranges)
targets = np.array([[56, 70], [81,101], [119, 133], [22,37], [103, 119]], dtype = 'float32')
#Convert inputs and targets to tensors
inputs = torch.from_numpy(inputs)
targets = torch.from_numpy(targets)
print (inputs)
print(targets)
###Linear Regression model
#Weights and Biases
w = torch.randn(2, 3, requires_grad = True)
b = torch.randn(2, requires_grad = True)
print(w)
print(b)
###Model
def model(x):
return x @w.t() + b
# Generate predictions
preds = model(inputs)
print(preds)
#Compare with actual targets
print(targets)
### Loss Function : to check how well our model is performing
# Calculate the difference betweeen the two matrices (preds and targets).
# Square all the elements of the difference matrix to remove negative values.
# Calaculate the average of the elements in the resulting matrix.
#diff = preds - targets
#print(diff)
#diff_sqr = diff * diff
#torch.sum(diff__sqr)/diff.numel()
#Mean Squared Error (MSE) loss
def mse(t1, t2):
diff = t1 - t2
return torch.sum(diff * diff) / diff.numel()
# Compute Loss
loss = mse(preds, targets)
print(loss)
# Compute Gradients
loss.backward()
# Gradients for weights
print(w)
print(w.grad)
# Gradients for biases
print(b)
print(b.grad)
# Reset the values of Grad values NOT the actual values
w.grad.zero_()
b.grad.zero_()
print(w)
print(b)
### Adjust weights and biases using gradient descent
# Generate predictions
# Calculate the loss
# Compute gradients w.r.t weights and biases
# Adjust the weights by subtracting a small quantity proportional to the gradient
# Reset gradients to zero
# Generate predictions
preds = model(inputs)
print(preds)
# Calculate the loss
loss = mse (preds, targets)
print(loss)
# Compute Gradients
loss.backward()
print(w.grad)
print(b.grad)
# Adjust the weights and rest gradients
with torch.no_grad():
w -= w.grad * 1e-5
b -= b.grad * 1e-5
w.grad.zero_()
b.grad.zero_()
print(w)
print(b)
#Lets check if the above slight change in the weights have improved the loss value (i.e decrease) or not.
#Calculate loss again
preds = model(inputs)
loss = mse (preds, targets)
print(loss)
# Train for multiple epochs (let say for 500 epochs)
for i in range (500):
preds =model(inputs)
loss = mse(preds, targets)
loss.backward() # To calculate the gradients
with torch.no_grad():
w -= w.grad * 1e-5 # 1e-5 is Learning rate which is a hyper-parameter in machine learning
b -= b.grad * 1e-5
w.grad.zero_() # Reset the gradients to zero
b.grad.zero_()
### Lets, calculate the loss again
# Calculate loss
preds = model(inputs)
loss = mse (preds, targets)
print(loss)
### Lets compare predictions and targets(actual values)
#Predictions
print(preds)
#Targets (Actual values)
print(targets)
##### Working with Jovian
##Intall
#pip install jovian --upgrade -q
#import jovian
#jovian.commit()
################# Linear regression using PyTorch built-insert without making manual functions
import numpy as np
import torch
import torch.nn as nn
# Input (temp, rainfall, humidity)
inputs = np.array([[73,67,43], [91, 88, 64], [87, 134, 58], [102, 43, 37], [69, 96, 70], [73, 67, 43], [91,88,64], [87, 134, 58], [102, 43, 37], [69, 96, 70], [73, 67, 43], [91, 88, 64],[87, 134, 58], [102, 43, 37], [69, 96, 70]], dtype = 'float32')
#targets (apples, oranges)
targets =np.array([[56, 70], [81, 101], [119, 133], [22, 37], [103, 119], [56, 70], [81, 101], [119, 133], [22, 37], [103, 119], [56, 70], [81, 101], [119, 133], [22, 37], [103, 119]], dtype = 'float32')
inputs = torch.from_numpy(inputs)
targets = torch.from_numpy(targets)
###Dataset and dataloader
# We are not going to use complete dataset but in batches to deal with memory issues and less complex computations
from torch.utils.data import TensorDataset
# Define Dataset using TensorDataset
train_ds = TensorDataset(inputs, targets)
train_ds[0:3]
# You can also pick specific rows of data)
#train_ds[[1, 3, 5, 7]]
from torch.utils.data import DataLoader
#Define DataLoader
batch_size = 5
train_dl = DataLoader(train_ds, batch_size, shuffle=True)
for xb, yb in train_dl:
#print('batch:') # prints all the batches
print(xb)
print(yb)
break # use break if you want to work only one batch and remove the line 179 that prints all batches
# Define model using nn.Linear
model = nn.Linear (3, 2)
print(model.weight)
print(model.bias)
# Parameters
list(model.parameters())
#Generate predictions
preds = model(inputs)
print(preds)
###Loss Functions
# Import nn.functional
import torch.nn.functional as F
# Define Loss functional
loss_fn = F.mse_loss
loss = loss_fn(model(inputs),targets)
print(loss)
# Note: to read help on Linear model of pytorch use following line
# ?nn.Linear
# ?F.mse_loss
###Optimizer
# Define optimizer (Stochastic Gradient Descent)
opt = torch.optim.SGD(model.parameters(), lr=1e-5)
### Train the model
# Generate predictions
# Calculate the loss
# Compute gradients w.r.t weights and biases
# Adjust the weights by subtracting a small quantity proportional to the gradient
# Reset gradients to zero
#Utility fiunction to train the model
def fit(num_epochs, model, loss_fn, opt):
#Repeat for given number of epochs
for epoch in range(num_epochs):
#Train with batches of data
for xb, yb in train_dl:
#1. Generate predictions
pred = model(xb)
#2. Calculate loss
loss = loss_fn(pred, yb)
#3. Compute gradients
loss.backward()
#4. Update parameters using gradients
opt.step()
#5. reset the gradients to zero
opt.zero_grad()
#Print the progress
if (epoch+1) % 10 == 0:
print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))
# loss.item gives the actual value of th eloss in that batch (here after every 10th epoch)
# Train the model for 100 epochs
fit(100, model, loss_fn, opt)
# Generate predictions
preds = model(inputs)
print(preds)
# Compare the weights
print(targets)
### Commit and update the notebook
import jovian
jovian.commit()
|
NancyGirdhar/PyTorch_Basics
|
PyTorchSeries_E1.py
|
PyTorchSeries_E1.py
|
py
| 6,595 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.array",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.randn",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.randn",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.TensorDataset",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "torch.nn.Linear",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.mse_loss",
"line_number": 203,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "torch.optim.SGD",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "jovian.commit",
"line_number": 263,
"usage_type": "call"
}
] |
1218087321
|
import os
from collections import namedtuple
#Define a named tuple to represent our files
FileStruct = namedtuple("File", "file_name file_ext file_path dir")
def XMLifyFile(file_struct):
return '\t\t<file alias="' + file_struct.file_name + '">' + file_struct.file_path + "</file>\n"
valid_exts = [".graphml", ".png", ".gif", ".jpg" , ".png", ".ico"]
directories = {}
for root, dirs, files in os.walk("."):
for file_name in files:
# Trim off the ./
dir_name = root[2:]
alias_name = dir_name
dir_name = "Resources/" + dir_name
# Append Resource Path
file_prefix = os.path.splitext(file_name)[0]
file_ext = os.path.splitext(file_name)[1].lower()
file_path = os.path.join(dir_name, file_name)
if file_ext in valid_exts:
s = FileStruct(file_name = file_prefix, file_ext = file_ext, file_path = file_path, dir = dir_name)
if alias_name not in directories:
directories[alias_name] = []
directories[alias_name].append(s)
resource_file = open("../resources.qrc", 'w')
resource_file.write("<RCC>\n")
for dir_name in directories:
open_qresource = '\t<qresource prefix="/'+ dir_name + '">\n'
close_qresource = '\t</qresource>\n'
resource_file.write(open_qresource)
for file_struct in directories[dir_name]:
resource_file.write(XMLifyFile(file_struct))
resource_file.write(close_qresource)
resource_file.write("</RCC>\n")
resource_file.flush()
resource_file.close()
|
cdit-ma/SEM
|
medea/src/app/Resources/resourceqrcmaker.py
|
resourceqrcmaker.py
|
py
| 1,521 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "collections.namedtuple",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
}
] |
83273089
|
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
def read_file():
# 读取数据
df = pd.read_table("/yourfilepath/SMSSpamCollection",
header=None,
names=['label', 'sms_message'])
# 做一个map表,0表示'ham',1表示'spam'
df['label'] = df.label.map({'ham': 0, 'spam': 1})
return df
def train_and_test_data(df_data):
# 分割训练集和测试机
X_train, X_test, y_train, y_test = train_test_split(df_data['sms_message'],
df_data['label'],
random_state=1)
# 创建实例
count_vector = CountVectorizer()
# 训练数据转成矩阵
training_data = count_vector.fit_transform(X_train)
# 转化测试集
testing_data = count_vector.transform(X_test)
naive_bayes = MultinomialNB()
# 运用朴素贝叶斯算法
naive_bayes.fit(training_data, y_train)
# 预测数据
predictions = naive_bayes.predict(testing_data)
return predictions, X_train, X_test, y_train, y_test
def evaluate_model(predictions, y_test):
print('Accuracy score: ', format(accuracy_score(y_test, predictions)))
print('Precision score: ', format(precision_score(y_test, predictions)))
print('Recall score: ', format(recall_score(y_test, predictions)))
print('F1 score: ', format(f1_score(y_test, predictions)))
def print_testing_result(X_test, y_test, predictions):
category_map = {0: 'ham', 1: 'spam'}
for message, category, real in zip(X_test[50:100], predictions[50:100], y_test[50:100]):
print('\n recevie message:', message, '\n prediction:', category_map[category], 'true value:',
category_map[real])
if __name__ == "__main__":
df = read_file()
predictions, _, X_test, _, y_test = train_and_test_data(df)
evaluate_model(predictions, y_test)
print_testing_result(X_test, y_test, predictions)
|
goelo/machine_learning
|
naive_bayes/smsspammessage.py
|
smsspammessage.py
|
py
| 2,190 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "pandas.read_table",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.CountVectorizer",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sklearn.naive_bayes.MultinomialNB",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.precision_score",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.recall_score",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.f1_score",
"line_number": 41,
"usage_type": "call"
}
] |
11902565567
|
#!/usr/bin/env python3
from pprint import pprint
import subprocess
import pyone
import config
# -----------------------
# Connect to OpenNebula
# -----------------------
one = pyone.OneServer(config.ONE['address'], session='%s:%s' % (config.ONE['username'], config.ONE['password']))
# prepare hosts to ips mapping
hostToIp = {}
hosts = one.hostpool.info()
for host in hosts.HOST:
if host.STATE != 2:
continue
host_name = host.TEMPLATE['HOSTNAME']
hostToIp[host_name] = {}
hostToIp[host_name]['ip'] = host.TEMPLATE['IP_ADDRESS']
hostToIp[host_name]['cgroups'] = host.TEMPLATE['CGROUPS_VERSION']
print('{count} compute nodes found'.format(count=len(hostToIp)))
# get vms
vms = one.vmpool.infoextended(-2, -1, -1, -1)
for vm in vms.VM:
host = vm.HISTORY_RECORDS.HISTORY[-1].HOSTNAME
hostIp = hostToIp[host]['ip']
hostCgroups = int(hostToIp[host]['cgroups'])
if hostCgroups == 2:
cpuCount = float(vm.TEMPLATE.get('CPU'))
shares = int(100 * cpuCount)
print('Updating VM %s: virsh -c qemu+tcp://%s/system schedinfo %s --set cpu_shares=%s' % (vm.NAME, hostIp, vm.DEPLOY_ID, shares))
subprocess.check_call('virsh -c qemu+tcp://%s/system schedinfo %s --set cpu_shares=%s' % (hostIp, vm.DEPLOY_ID, shares),
shell=True)
|
OpenNebula/addon-3par
|
scripts/helpers/update-cpu-shares.py
|
update-cpu-shares.py
|
py
| 1,335 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "pyone.OneServer",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "config.ONE",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "subprocess.check_call",
"line_number": 41,
"usage_type": "call"
}
] |
17591759593
|
import pathlib
from PIL import Image
class image_to_c_array:
def __init__(self, image_path, output_path, format_bytes_count, char_array_name, include_header_guard=False, include_header_guard_name=None, reset_output_file=True):
self.image_path = image_path
self.output_path = output_path
self.file_bytes = pathlib.Path(image_path).read_bytes()
self.char_array_name = char_array_name
self.format_bytes_count = format_bytes_count
self.include_header_guard_name = include_header_guard_name
self.include_header_guard = include_header_guard
self.reset_output_file = reset_output_file
self.file_bytes_len = 0
for _b in self.file_bytes:
self.file_bytes_len += 1
def grab_string(self):
if self.include_header_guard:
data = "#ifndef {}\n#define {}\nunsigned char {}[{}] = {{\n\t".format(self.include_header_guard_name, self.include_header_guard_name, self.char_array_name,self.file_bytes_len)
else:
data = "unsigned char {}[{}] = {{\n\t".format(self.char_array_name, self.file_bytes_len)
count = 0
for x in self.file_bytes:
if count == self.format_bytes_count:
data += "\n\t"
count = 0
temp_data = str(hex(x))
if len(temp_data) == 3:
temp_data = temp_data.replace("0x", "0x0")
data += temp_data.upper() + ", "
count += 1
if self.include_header_guard_name:
data += "\n};\n#endif\n"
else:
data += "\n};\n\n"
return data
def save(self):
if self.reset_output_file:
open(self.output_path, 'w+').close()
with open(self.output_path, 'a+') as f:
f.write(self.grab_string())
|
0xRooted/File-To-C-Array
|
filetocarray.py
|
filetocarray.py
|
py
| 1,846 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pathlib.Path",
"line_number": 9,
"usage_type": "call"
}
] |
14048408200
|
from io import StringIO
from pathlib import Path
import streamlit as st
import time
from detect import detect
import os
import sys
import argparse
from PIL import Image
import shutil
import streamlit.components.v1 as components
def get_subdirs(b='.'):
'''
Returns all sub-directories in a specific Path
'''
result = []
for d in os.listdir(b):
bd = os.path.join(b, d)
if os.path.isdir(bd):
result.append(bd)
return result
def get_detection_folder():
'''
Returns the latest folder in a runs\detect
'''
return max(get_subdirs(os.path.join('runs', 'detect')), key=os.path.getmtime)
if __name__ == '__main__':
st.title('Bird Identification System ')
table_html = """
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<link rel="stylesheet" href="//stackpath.bootstrapcdn.com/bootstrap/4.2.1/css/bootstrap.min.css"
integrity="sha384-GJzZqFGwb1QTTN6wy59ffF1BuGJpLSa9DkKMp0DgiMDm4iYMj70gZWKYbI706tWS" crossorigin="anonymous">
<style>
.bd-placeholder-img {
font-size: 1.125rem;
text-anchor: middle;
}
@media (min-width: 768px) {
.bd-placeholder-img-lg {
font-size: 3.5rem;
}
}
</style>
<link rel="stylesheet" href="/static/style.css">
<title>Bird Identification System</title>
</head>
<body class="text-center">
<form class="form-signin card mb-6" method=post enctype=multipart/form-data>
<img class="mb-4" src="https://ts1.cn.mm.bing.net/th/id/R-C.93dc7e23a93c7b1b1d23361ce54692a1?rik=6cirEfWmxE5hyQ&riu=http%3a%2f%2fpic.bizhi360.com%2fbbpic%2f0%2f4300.jpg&ehk=kJ5JAQiiwI2BtUKwuLsGoUzUtUagshyomug1aDlAc3A%3d&risl=&pid=ImgRaw&r=0" alt="" width="150"
style="border-radius:50%">
<h1 class="h3 mb-3 font-weight-normal">Upload Any Bird Image or Video</h1>
<br />
<button>
<span class="box">
Weclome!
</span>
</button>
<p class="mt-5 mb-3 text-muted">Built using Streamlit and Pytorch</p>
</body>
<!-- Github Ribbon Start-->
<a href="https://github.com" class="github-corner"><svg width="160" height="160"
viewBox="0 0 250 250" style="fill:#0E2E3B; color:#FFFFFF; position: absolute; top: 0; border: 0; right: 0;">
<path d="M0,0 L115,115 L130,115 L142,142 L250,250 L250,0 Z"></path>
<path
d="M128.3,109.0 C113.8,99.7 119.0,89.6 119.0,89.6 C122.0,82.7 120.5,78.6 120.5,78.6 C119.2,72.0 123.4,76.3 123.4,76.3 C127.3,80.9 125.5,87.3 125.5,87.3 C122.9,97.6 130.6,101.9 134.4,103.2"
fill="currentColor" style="transform-origin: 130px 106px;" class="octo-arm"></path>
<path
d="M115.0,115.0 C114.9,115.1 118.7,116.5 119.8,115.4 L133.7,101.6 C136.9,99.2 139.9,98.4 142.2,98.6 C133.8,88.0 127.5,74.4 143.8,58.0 C148.5,53.4 154.0,51.2 159.7,51.0 C160.3,49.4 163.2,43.6 171.4,40.1 C171.4,40.1 176.1,42.5 178.8,56.2 C183.1,58.6 187.2,61.8 190.9,65.4 C194.5,69.0 197.7,73.2 200.1,77.6 C213.8,80.2 216.3,84.9 216.3,84.9 C212.7,93.1 206.9,96.0 205.4,96.6 C205.1,102.4 203.0,107.8 198.3,112.5 C181.9,128.9 168.3,122.5 157.7,114.1 C157.9,116.9 156.7,120.9 152.7,124.9 L141.0,136.5 C139.8,137.7 141.6,141.9 141.8,141.8 Z"
fill="currentColor" class="octo-body"></path>
</svg></a>
<style>
.github-corner:hover .octo-arm {
animation: octocat-wave 560ms ease-in-out
}
@keyframes octocat-wave {
0%,
100% {
transform: rotate(0)
}
20%,
60% {
transform: rotate(-25deg)
}
40%,
80% {
transform: rotate(10deg)
}
}
@media (max-width:500px) {
.github-corner:hover .octo-arm {
animation: none
}
.github-corner .octo-arm {
animation: octocat-wave 560ms ease-in-out
}
}
</style>
<!-- Github Ribbon End-->
</html>"""
components.html(table_html, height=400, scrolling=True)
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str,
default='best100.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str,
default='100birds/test/', help='source')
parser.add_argument('--img-size', type=int, default=224,
help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float,
default=0.45, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float,
default=0.45, help='IOU threshold for NMS')
parser.add_argument('--device', default='cpu',
help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true',
help='display results')
parser.add_argument('--save-txt', action='store_true',
help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true',
help='save confidences in --save-txt labels')
parser.add_argument('--nosave', action='store_true',
help='do not save images/videos')
parser.add_argument('--classes', nargs='+', type=int,
help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true',
help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true',
help='augmented inference')
parser.add_argument('--update', action='store_true',
help='update all models')
parser.add_argument('--project', default='runs/detect',
help='save results to project/name')
parser.add_argument('--name', default='exp',
help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true',
help='existing project/name ok, do not increment')
parser.add_argument('--no-trace', action='store_true', help='don`t trace model')
opt = parser.parse_args()
print(opt)
source = ("图片检测", "视频检测", "文件夹检测")
source_index = st.sidebar.selectbox("选择输入", range(
len(source)), format_func=lambda x: source[x])
if source_index == 0:
uploaded_file = st.sidebar.file_uploader(
"上传图片", type=['png', 'jpeg', 'jpg'])
if uploaded_file is not None:
is_valid = True
with st.spinner(text='资源加载中...'):
st.sidebar.image(uploaded_file)
picture = Image.open(uploaded_file)
picture = picture.save(f'100birds/test/{uploaded_file.name}')
opt.source = f'100birds/test/{uploaded_file.name}'
else:
is_valid = False
elif source_index == 1:
uploaded_file = st.sidebar.file_uploader("上传视频", type=['mp4'])
if uploaded_file is not None:
is_valid = True
with st.spinner(text='资源加载中...'):
st.sidebar.video(uploaded_file)
with open(os.path.join("100birds", "video", uploaded_file.name), "wb") as f:
f.write(uploaded_file.getbuffer())
opt.source = f'100birds/video/{uploaded_file.name}'
else:
is_valid = False
else:
uploaded_files = st.sidebar.file_uploader("上传文件夹", accept_multiple_files=True)
for uploaded_file in uploaded_files:
if uploaded_file is not None:
is_valid = True
with st.spinner(text='资源加载中...'):
st.sidebar.image(uploaded_file)
picture = Image.open(uploaded_file)
picture = picture.save(f'100birds/test/{uploaded_file.name}')
opt.source = f'100birds/test/{uploaded_file.name}'
else:
is_valid = False
is_valid = True
if is_valid:
print('valid')
if source_index == 0:
if st.button('开始检测'):
detect(opt)
with st.spinner(text='Preparing Images'):
for img in os.listdir(get_detection_folder()):
st.image(str(Path(f'{get_detection_folder()}') / img))
st.balloons()
elif source_index == 1:
if st.button('开始检测'):
detect(opt)
with st.spinner(text='Preparing Video'):
for vid in os.listdir(get_detection_folder()):
st.video(str(Path(f'{get_detection_folder()}') / vid))
st.balloons()
else:
if st.button('开始检测'):
for dirpath, dirname, filenames in os.walk('100birds/test'):
for filename in filenames:
opt.source = os.path.join(dirpath, filename)
detect(opt)
with st.spinner(text='Preparing file folder'):
for img in os.listdir(get_detection_folder()):
st.image(str(Path(f'{get_detection_folder()}') / img))
# for vid in os.listdir(get_detection_folder()):
# st.video(str(Path(f'{get_detection_folder()}') / vid))
st.balloons()
shutil.rmtree('100birds/test')
os.mkdir('100birds/test')
# streamlit run main.py
|
fengxizxf/yolov-bird
|
main.py
|
main.py
|
py
| 9,736 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "os.listdir",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "streamlit.title",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "streamlit.components.v1.html",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "streamlit.components.v1",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar.selectbox",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.file_uploader",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "streamlit.spinner",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar.image",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "streamlit.sidebar.file_uploader",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "streamlit.spinner",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar.video",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 188,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.file_uploader",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "streamlit.spinner",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar.image",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 200,
"usage_type": "name"
},
{
"api_name": "streamlit.button",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "detect.detect",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "streamlit.spinner",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "streamlit.image",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "streamlit.balloons",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "streamlit.button",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "detect.detect",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "streamlit.spinner",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "streamlit.video",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "streamlit.balloons",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "streamlit.button",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 232,
"usage_type": "attribute"
},
{
"api_name": "detect.detect",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "streamlit.spinner",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "streamlit.image",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "streamlit.balloons",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 242,
"usage_type": "call"
}
] |
19733029074
|
# -*- coding: utf-8 -*-
import logging
import requests
from bs4 import BeautifulSoup
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('wb')
class Client:
def __init__(self):
self.session = requests.Session()
self.session.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36',
'Accept-Language': 'ru',
}
def load_page(self):
url = 'https://www.wildberries.ru/catalog/aksessuary/aksessuary-dlya-volos'
res = self.session.get(url=url)
res.raise_for_status()
return res.text
def parse_page(self, text: str):
soup = BeautifulSoup(text, 'lxml')
container = soup.select('div.dtList.i-dtList.j-card-item')
for block in container:
self.parse_block(block=block)
def parse_block(self, block):
logger.info(block)
logger.info('=' * 100)
def run(self):
text = self.load_page()
self.parse_page(text=text)
if __name__ == "__main__":
parser = Client()
parser.run()
|
KogameDev/WildberriesParser
|
main.py
|
main.py
|
py
| 1,056 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "requests.Session",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 26,
"usage_type": "call"
}
] |
35610337121
|
import cv2
import os
cam = cv2.VideoCapture("video.avi")
values = []
def discrimator(frame):
return frame[0][0][1] != 253
# Read each frame. Use discriminator on each frame to output a zero or one.
while True:
ret, f = cam.read()
if not ret:
break
values.append(
discrimator(f)
)
ones_and_zeroes = "".join(str(int(x)) for x in values)
byte_string = int(ones_and_zeroes, 2).to_bytes(len(ones_and_zeroes) // 8, byteorder='big')
os.write(1, byte_string)
|
sectalks/sectalks
|
ctf-solutions/LON0x26/bc/vid.py
|
vid.py
|
py
| 492 |
python
|
en
|
code
| 277 |
github-code
|
6
|
[
{
"api_name": "cv2.VideoCapture",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.write",
"line_number": 22,
"usage_type": "call"
}
] |
41087502611
|
import matplotlib.pyplot as plt
import pandas as pd
def main():
# Charger les données à partir du fichier CSV
data = pd.read_csv("Parcoursup 2023 - Total.csv", delimiter=";")
# Extraire les colonnes nécessaires
dates = pd.to_datetime(data["Date"], format="%d/%m") # type: ignore
en_attente = data[
"Candidats n'ayant pas encore reçu de proposition ou en attente de place"
]
# Créer les limites des bacs pour l'histogramme en escalier
x = range(len(dates) + 1)
y = list(en_attente) + [en_attente.iloc[-1]] # Candidats en attente
# Créer le graphique avec la courbe en rouge pour les candidats en attente
plt.figure(figsize=(8, 6))
plt.step(x, y, where="post", color="orange", linewidth=2, alpha=0.7)
plt.xlabel("Date")
plt.ylabel(
"Candidats n'ayant pas encore reçu de\nproposition ou en attente de place"
)
plt.title(
"Évolution en fonction du temps du nombre de\ncandidats sans affectation sur Parcoursup en 2023"
)
x_ticks = range(0, len(dates), 3)
x_labels = dates[x_ticks].dt.strftime("%d/%m") # type: ignore
plt.xticks(x_ticks, x_labels, rotation=45)
# Ajuster les limites de l'axe x pour supprimer l'espace blanc à gauche
plt.xlim(0, len(dates) - 1)
plt.ylim(0, 400000)
plt.twinx()
plt.ylim(0, (400000 / 827271) * 100)
plt.ylabel("Pourcentage en fonction du nombre de candidats")
plt.tight_layout()
plt.savefig(
"Évolution en fonction du temps du nombre de candidats sans affectation sur Parcoursup en 2023.svg"
)
print(
"Le graphique a été enregistré dans le fichier 'Évolution en fonction du temps du nombre de candidats sans affectation sur Parcoursup en 2023.svg'."
)
plt.close()
if __name__ == "__main__":
main()
|
Ahhj93/Indicateur-Parcoursup-2023
|
parcoursup_candidats_en_attente.py
|
parcoursup_candidats_en_attente.py
|
py
| 1,820 |
python
|
fr
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.step",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.twinx",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 47,
"usage_type": "name"
}
] |
34836265477
|
from django.contrib.contenttypes.models import ContentType
from django_filters import rest_framework as filters
from music_app.models import Artist, Track, Album
from music_app.apps import MusicAppConfig
_content_types_id = {
'artist': ContentType.objects.get(app_label=MusicAppConfig.name, model='artist').id,
'album': ContentType.objects.get(app_label=MusicAppConfig.name, model='album').id,
'track': ContentType.objects.get(app_label=MusicAppConfig.name, model='track').id,
}
class BaseSpotifyFilterSet(filters.FilterSet):
name = filters.CharFilter(
field_name='name',
lookup_expr='icontains',
label='Name',
)
spotify_id = filters.CharFilter(
field_name='spotify_id',
lookup_expr='exact',
label='Spotify ID',
)
spotify_uri = filters.CharFilter(
field_name='spotify_uri',
lookup_expr='exact',
label='Spotify URI',
)
class ArtistFilterSet(BaseSpotifyFilterSet):
class Meta:
model = Artist
fields = []
class TrackFilterSet(BaseSpotifyFilterSet):
class Meta:
model = Track
fields = []
class AlbumFilterSet(BaseSpotifyFilterSet):
class Meta:
model = Album
fields = []
class CommentFilterSet(filters.FilterSet):
creator = filters.NumberFilter(
field_name='creator_id',
lookup_expr='exact',
label='Creator ID',
)
content_type = filters.ChoiceFilter(
choices=[
(_content_types_id['artist'], 'artist'),
(_content_types_id['album'], 'album'),
(_content_types_id['track'], 'track')
],
field_name='content_type',
lookup_expr='exact',
label='Type of model'
)
|
vladyslavtsurkan/django_music_application
|
music_app/api/filters.py
|
filters.py
|
py
| 1,745 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.contrib.contenttypes.models.ContentType.objects.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.contrib.contenttypes.models.ContentType.objects",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.contenttypes.models.ContentType",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "music_app.apps.MusicAppConfig.name",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "music_app.apps.MusicAppConfig",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.contrib.contenttypes.models.ContentType.objects.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.contrib.contenttypes.models.ContentType.objects",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.contenttypes.models.ContentType",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "music_app.apps.MusicAppConfig.name",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "music_app.apps.MusicAppConfig",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.contrib.contenttypes.models.ContentType.objects.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.contrib.contenttypes.models.ContentType.objects",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.contenttypes.models.ContentType",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "music_app.apps.MusicAppConfig.name",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "music_app.apps.MusicAppConfig",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django_filters.rest_framework.FilterSet",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "django_filters.rest_framework",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django_filters.rest_framework.CharFilter",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django_filters.rest_framework",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django_filters.rest_framework.CharFilter",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django_filters.rest_framework",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django_filters.rest_framework.CharFilter",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django_filters.rest_framework",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "music_app.models.Artist",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "music_app.models.Track",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "music_app.models.Album",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "django_filters.rest_framework.FilterSet",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "django_filters.rest_framework",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "django_filters.rest_framework.NumberFilter",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "django_filters.rest_framework",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "django_filters.rest_framework.ChoiceFilter",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "django_filters.rest_framework",
"line_number": 56,
"usage_type": "name"
}
] |
650752867
|
import os
import sys
import json
import unittest
import numpy as np
import luigi
import z5py
import cluster_tools.utils.volume_utils as vu
from sklearn.metrics import adjusted_rand_score
from elf.segmentation.mutex_watershed import mutex_watershed
from elf.segmentation.watershed import apply_size_filter
try:
from ..base import BaseTest
except Exception:
sys.path.append(os.path.join(os.path.split(__file__)[0], ".."))
from base import BaseTest
class TestMws(BaseTest):
input_key = "volumes/affinities"
output_key = "data"
offsets = [[-1, 0, 0], [0, -1, 0], [0, 0, -1],
[-2, 0, 0], [0, -3, 0], [0, 0, -3],
[-3, 0, 0], [0, -9, 0], [0, 0, -9],
[-4, 0, 0], [0, -27, 0], [0, 0, -27]]
strides = [4, 12, 12]
def _check_result(self, size_filter):
# load affs and compare
with z5py.File(self.input_path, "r") as f:
ds = f[self.input_key]
ds.n_threads = 4
affs = vu.normalize(ds[:])
shape = affs.shape[1:]
with z5py.File(self.output_path, "r") as f:
res = f[self.output_key][:]
self.assertEqual(res.shape, shape)
exp = mutex_watershed(affs, self.offsets, self.strides)
if size_filter > 0:
exp, _ = apply_size_filter(exp.astype("uint32"), np.max(affs[:3], axis=0), size_filter)
score = adjusted_rand_score(exp.ravel(), res.ravel())
expected_score = 0.1
self.assertLess(1. - score, expected_score)
def test_mws(self):
from cluster_tools.mutex_watershed import MwsWorkflow
config = MwsWorkflow.get_config()["mws_blocks"]
config["strides"] = self.strides
size_filter = config["size_filter"]
with open(os.path.join(self.config_folder, "mws_blocks.config"), "w") as f:
json.dump(config, f)
task = MwsWorkflow(tmp_folder=self.tmp_folder, config_dir=self.config_folder,
max_jobs=self.max_jobs, target=self.target,
input_path=self.input_path, input_key=self.input_key,
output_path=self.output_path, output_key=self.output_key,
offsets=self.offsets)
ret = luigi.build([task], local_scheduler=True)
self.assertTrue(ret)
self._check_result(size_filter)
if __name__ == "__main__":
unittest.main()
|
constantinpape/cluster_tools
|
test/mutex_watershed/test_mws.py
|
test_mws.py
|
py
| 2,409 |
python
|
en
|
code
| 32 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.split",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "base.BaseTest",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "z5py.File",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils.normalize",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "z5py.File",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "elf.segmentation.mutex_watershed.mutex_watershed",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "elf.segmentation.watershed.apply_size_filter",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.adjusted_rand_score",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "cluster_tools.mutex_watershed.MwsWorkflow.get_config",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "cluster_tools.mutex_watershed.MwsWorkflow",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "cluster_tools.mutex_watershed.MwsWorkflow",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "luigi.build",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "unittest.main",
"line_number": 69,
"usage_type": "call"
}
] |
19243529886
|
import shlex
import django_filters
from django.core.exceptions import FieldError
from django.db.models import Q
# The function and Classes in this file are from https://github.com/nexB/scancode.io/blob/main/scanpipe/filters.py
def parse_query_string_to_lookups(query_string, default_lookup_expr, default_field):
"""Parse a query string and convert it into queryset lookups using Q objects."""
lookups = Q()
terms = shlex.split(query_string)
lookup_types = {
"=": "iexact",
"^": "istartswith",
"$": "iendswith",
"~": "icontains",
">": "gt",
"<": "lt",
}
for term in terms:
lookup_expr = default_lookup_expr
negated = False
if ":" in term:
field_name, search_value = term.split(":", maxsplit=1)
if field_name.endswith(tuple(lookup_types.keys())):
lookup_symbol = field_name[-1]
lookup_expr = lookup_types.get(lookup_symbol)
field_name = field_name[:-1]
if field_name.startswith("-"):
field_name = field_name[1:]
negated = True
else:
search_value = term
field_name = default_field
lookups &= Q(**{f"{field_name}__{lookup_expr}": search_value}, _negated=negated)
return lookups
class QuerySearchFilter(django_filters.CharFilter):
"""Add support for complex query syntax in search filter."""
def filter(self, qs, value):
if not value:
return qs
lookups = parse_query_string_to_lookups(
query_string=value,
default_lookup_expr=self.lookup_expr,
default_field=self.field_name,
)
try:
return qs.filter(lookups)
except FieldError:
return qs.none()
class PackageSearchFilter(QuerySearchFilter):
def filter(self, qs, value):
if not value:
return qs
if value.startswith("pkg:"):
return qs.for_package_url(value)
if "://" not in value and ":" in value:
return super().filter(qs, value)
search_fields = ["type", "namespace", "name", "version", "download_url"]
lookups = Q()
for field_names in search_fields:
lookups |= Q(**{f"{field_names}__{self.lookup_expr}": value})
return qs.filter(lookups)
|
nexB/purldb
|
packagedb/filters.py
|
filters.py
|
py
| 2,388 |
python
|
en
|
code
| 23 |
github-code
|
6
|
[
{
"api_name": "django.db.models.Q",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "shlex.split",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "django_filters.CharFilter",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "django.core.exceptions.FieldError",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "django.db.models.Q",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 81,
"usage_type": "call"
}
] |
36358232508
|
import hide
headers = hide.headers
TOKEN = hide.TOKEN
tell_token = hide.tell_token
chat_id = hide.chat_id
import http.client
import mimetypes
import ssl
import json
import time
from time import localtime, strftime
from datetime import datetime
import requests
import json
# mac has some issue with SLL this fixes it
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
def send_to_telegram(message):
url = 'https://api.telegram.org/bot'+tell_token+'/sendMessage'
for i in chat_id:
data = {'chat_id': i, 'text': message}
try:
requests.post(url, data).json()
print("Message sent to Telegram")
except:
print("message did not send")
print("start")
send_to_telegram("Start - The code has started")
time_for_active_6 = 0
time_for_active_7 = 0
while True:
try:
conn = http.client.HTTPSConnection("apiv4.olarm.co")
payload = ''
conn.request("GET", "/api/v4/devices/2731071d-a487-44e8-bb29-fb9a189f6e72/events?pageLength=40", payload, headers)
res = conn.getresponse()
data = res.read()
my_json = data.decode('utf8').replace("'", '"')
jdata = json.loads(my_json)
for i in jdata['data'][::-1]:
if i['eventNum'] == 6 and i['eventState'] == 'active':
time_for_active_6 = i['eventTime']
if i['eventNum'] == 7 and i['eventState'] == 'active':
time_for_active_7 = i['eventTime']
if i['eventNum'] == 6 and i['eventState'] == 'closed':
time_for_active_6 = 0
if i['eventNum'] == 7 and i['eventState'] == 'closed':
time_for_active_7 = 0
# Function to find the diffrence between two dates.
def time_between(d1, d2):
d1 = datetime.strptime(d1, "%Y-%m-%d %H:%M:%S")
d2 = datetime.strptime(d2, "%Y-%m-%d %H:%M:%S")
#return d2-d1
return abs((d2 - d1).seconds/60)
if time_for_active_6 != 0:
time_from_the_gararge = (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time_for_active_6/1000)))
current = ( strftime("%Y-%m-%d %H:%M:%S", localtime() ) )
elapsed = time_between(time_from_the_gararge, current)
if elapsed > 10:
print("The Gararge (orange car) has been open for longer then ",elapsed , "mins" )
send_to_telegram("The door (orange car) has been open for " + str(elapsed) + " mins")
time.sleep(120)
if time_for_active_7 != 0:
time_from_the_gararge = (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time_for_active_7/1000)))
current = ( strftime("%Y-%m-%d %H:%M:%S", localtime() ) )
elapsed = time_between(time_from_the_gararge, current)
if elapsed > 10:
print("the Gararge (blue car) has been open for longer then ",elapsed , "mins" )
send_to_telegram("The door (blue car) has been open for " + str(elapsed) + " mins")
time.sleep(120)
# finaly we wait for 2 mins and then we do the whole process again
time.sleep(120)
except:
print("An error occurred try again")
try:
send_to_telegram("ERROR - Somthing went wrong - check the terminal")
except:
print("push bullet / telei is not working")
time.sleep(10)
|
tomashege/Olarm_zone_check
|
check_zone.py
|
check_zone.py
|
py
| 3,684 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "hide.headers",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "hide.TOKEN",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "hide.tell_token",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "hide.chat_id",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "ssl._create_unverified_context",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "ssl._create_default_https_context",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "requests.post",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "http.client.client.HTTPSConnection",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "http.client.client",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "http.client",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "time.strftime",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 94,
"usage_type": "call"
}
] |
11299411121
|
from functools import update_wrapper
import logging
from .action import FunctionAction
from .request import Request
from .traject import Traject
from .config import Configurable
from .settings import SettingSectionContainer
from .converter import ConverterRegistry
from .predicate import PredicateRegistry
from .tween import TweenRegistry
from . import generic
from reg import Registry as RegRegistry, CachingKeyLookup
import venusian
from . import compat
from .compat import with_metaclass
from .implicit import set_implicit
from .mount import MountRegistry
from .reify import reify
from .template import TemplateEngineRegistry
COMPONENT_CACHE_SIZE = 5000
ALL_CACHE_SIZE = 5000
FALLBACK_CACHE_SIZE = 5000
class Registry(Configurable, RegRegistry, MountRegistry, PredicateRegistry,
ConverterRegistry, TweenRegistry, TemplateEngineRegistry):
"""A registry holding an application's configuration.
"""
app = None # app this registry belongs to. set later during scanning
def __init__(self, name, bases, testing_config):
self.name = name
bases = [base.registry for base in bases if hasattr(base, 'registry')]
RegRegistry.__init__(self)
MountRegistry.__init__(self)
PredicateRegistry.__init__(self)
Configurable.__init__(self, bases, testing_config)
ConverterRegistry.__init__(self)
TweenRegistry.__init__(self)
TemplateEngineRegistry.__init__(self)
self.settings = SettingSectionContainer()
self.clear()
def actions(self):
yield FunctionAction(self, generic.settings), lambda: self.settings
def clear(self):
"""Clear all registrations in this application.
"""
RegRegistry.clear(self)
MountRegistry.clear(self)
PredicateRegistry.clear(self)
Configurable.clear(self)
ConverterRegistry.clear(self)
TweenRegistry.clear(self)
TemplateEngineRegistry.clear(self)
self.traject = Traject()
@reify
def lookup(self):
return CachingKeyLookup(
self,
COMPONENT_CACHE_SIZE,
ALL_CACHE_SIZE,
FALLBACK_CACHE_SIZE).lookup()
def callback(scanner, name, obj):
obj.registry.app = obj
scanner.config.configurable(obj.registry)
class AppMeta(type):
def __new__(cls, name, bases, d):
testing_config = d.get('testing_config')
d['registry'] = Registry(name, bases, testing_config)
result = super(AppMeta, cls).__new__(cls, name, bases, d)
venusian.attach(result, callback)
return result
class App(with_metaclass(AppMeta)):
"""A Morepath-based application object.
You subclass App to create a morepath application class. You can
then configure this class using Morepath decorator directives.
An application can extend one or more other applications, if
desired, by subclassing them. By subclassing App itself, you get
the base configuration of the Morepath framework itself.
Conflicting configuration within an app is automatically
rejected. An subclass app cannot conflict with the apps it is
subclassing however; instead configuration is overridden.
You can turn your app class into a WSGI application by instantiating
it. You can then call it with the ``environ`` and ``start_response``
arguments.
"""
testing_config = None
parent = None
"""The parent in which this app was mounted."""
request_class = Request
"""The class of the Request to create. Must be a subclass of
:class:`morepath.Request`.
"""
def __init__(self):
pass
@reify
def lookup(self):
"""Get the :class:`reg.Lookup` for this application.
:returns: a :class:`reg.Lookup` instance.
"""
return self.registry.lookup
def set_implicit(self):
set_implicit(self.lookup)
@reify
def traject(self):
return self.registry.traject
def request(self, environ):
"""Create a :class:`Request` given WSGI environment for this app.
:param environ: WSGI environment
:returns: :class:`morepath.Request` instance
"""
return self.request_class(environ, self)
def __call__(self, environ, start_response):
"""This app as a WSGI application.
"""
request = self.request(environ)
response = self.publish(request)
return response(environ, start_response)
def ancestors(self):
"""Return iterable of all ancestors of this app.
Includes this app itself as the first ancestor, all the way
up to the root app in the mount chain.
"""
app = self
while app is not None:
yield app
app = app.parent
@reify
def root(self):
"""The root application.
"""
return list(self.ancestors())[-1]
def child(self, app, **variables):
"""Get app mounted in this app.
Either give it an instance of the app class as the first
parameter, or the app class itself (or name under which it was
mounted) as the first parameter and as ``variables`` the
parameters that go to its ``mount`` function.
Returns the mounted application object, with its ``parent``
attribute set to this app object, or ``None`` if this
application cannot be mounted in this one.
"""
if isinstance(app, App):
result = app
# XXX assert that variables is empty
# XXX do we need to deal with subclasses of apps?
if app.__class__ not in self.registry.mounted:
return None
else:
if isinstance(app, compat.string_types):
factory = self.registry.named_mounted.get(app)
else:
factory = self.registry.mounted.get(app)
if factory is None:
return None
result = factory(**variables)
result.parent = self
return result
def sibling(self, app, **variables):
"""Get app mounted next to this app.
Either give it an instance of the app class as the first
parameter, or the app class itself (or name under which it was
mounted) as the first parameter and as ``variables`` the
parameters that go to its ``mount`` function.
Returns the mounted application object, with its ``parent``
attribute set to the same parent as this one, or ``None`` if such
a sibling application does not exist.
"""
parent = self.parent
if parent is None:
return None
return parent.child(app, **variables)
@reify
def publish(self):
# XXX import cycles...
from .publish import publish
result = publish
for tween_factory in reversed(self.registry.sorted_tween_factories()):
result = tween_factory(self, result)
return result
@classmethod
def directive(cls, name):
"""Decorator to register a new directive with this application class.
You use this as a class decorator for a :class:`morepath.Directive`
subclass::
@App.directive('my_directive')
class FooDirective(morepath.Directive):
...
This needs to be executed *before* the directive is being used
and thus might introduce import dependency issues unlike
normal Morepath configuration, so beware! An easy way to make
sure that all directives are installed before you use them is
to make sure you define them in the same module as where you
define the application class that has them.
"""
return DirectiveDirective(cls, name)
@classmethod
def dotted_name(cls):
return '%s.%s' % (cls.__module__, cls.__name__)
class DirectiveDirective(object):
def __init__(self, cls, name):
self.cls = cls
self.name = name
def __call__(self, directive):
directive_name = self.name
def method(self, *args, **kw):
result = directive(self, *args, **kw)
result.directive_name = directive_name
result.argument_info = args, kw
result.logger = logging.getLogger('morepath.directive.%s' %
directive_name)
return result
# this is to help morepath.sphinxext to do the right thing
method.actual_directive = directive
update_wrapper(method, directive.__init__)
setattr(self.cls, self.name, classmethod(method))
return directive
|
magnus-lycka/morepath
|
morepath/app.py
|
app.py
|
py
| 8,665 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "config.Configurable",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "reg.Registry",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "mount.MountRegistry",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "predicate.PredicateRegistry",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "converter.ConverterRegistry",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "tween.TweenRegistry",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "template.TemplateEngineRegistry",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "reg.Registry.__init__",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "reg.Registry",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "mount.MountRegistry.__init__",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "mount.MountRegistry",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "predicate.PredicateRegistry.__init__",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "predicate.PredicateRegistry",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "config.Configurable.__init__",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "config.Configurable",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "converter.ConverterRegistry.__init__",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "converter.ConverterRegistry",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "tween.TweenRegistry.__init__",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "tween.TweenRegistry",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "template.TemplateEngineRegistry.__init__",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "template.TemplateEngineRegistry",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "settings.SettingSectionContainer",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "action.FunctionAction",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "reg.Registry.clear",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "reg.Registry",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "mount.MountRegistry.clear",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "mount.MountRegistry",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "predicate.PredicateRegistry.clear",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "predicate.PredicateRegistry",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "config.Configurable.clear",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "config.Configurable",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "converter.ConverterRegistry.clear",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "converter.ConverterRegistry",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "tween.TweenRegistry.clear",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "tween.TweenRegistry",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "template.TemplateEngineRegistry.clear",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "template.TemplateEngineRegistry",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "traject.Traject",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "reg.CachingKeyLookup",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "reify.reify",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "venusian.attach",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "compat.with_metaclass",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "request.Request",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "reify.reify",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "implicit.set_implicit",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "reify.reify",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "reify.reify",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "compat.string_types",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "publish.publish",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "reify.reify",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "functools.update_wrapper",
"line_number": 261,
"usage_type": "call"
}
] |
30578979945
|
import os
import json
from datetime import date
from flask import Flask, g, jsonify, request, abort
from flask_cors import CORS #comment this on deployment
from db.jfl_db import Database
app = Flask(__name__)
CORS(app) #comment this on deployment
def get_db():
'''
Returns the document indexing object. Initializes a new Index object if one doesn't exist
'''
db = getattr(g, '_db', None)
if db is None:
db = Database(
host="localhost",
user="jeff",
password="password",
database="jfl"
)
return db
def validate_current_week_request(params):
if params.get('year') is None:
print("INFO: no 'year' attribute found in rewquest. Adding the current year")
params['year'] = date.today().year
def generate_current_week_response(args):
'''
Returns a JSON response with the current week
'''
response = get_db().get_current_week(args['year'])
return jsonify({"current_week": response})
def validate_teams_playing_request(params):
errors = []
if params.get('week') is None:
print("ERROR: no 'week' attribute found in request.")
errors.append("No 'week' attribute found in request.")
if params.get('year') is None:
print("INFO: no 'year' attribute found in rewquest. Adding the current year")
params['year'] = date.today().year
if len(errors) > 0:
error_message = '\n' + '\n\t'.join(errors)
abort(400, f"Invalid Request: {error_message}")
def generate_teams_playing_response(args):
'''
Returns a JSON response with the teams playing for the week
'''
response = get_db().get_teams_playing(args['week'], args['year'])
return jsonify(response)
def validate_draft_status_request(params):
errors = []
if params.get('week') is None:
print("ERROR: no 'week' attribute found in request.")
errors.append("No 'week' attribute found in request.")
if params.get('year') is None:
print("INFO: no 'year' attribute found in rewquest. Adding the current year")
params['year'] = date.today().year
if len(errors) > 0:
error_message = '\n' + '\n\t'.join(errors)
abort(400, f"Invalid Request: {error_message}")
def generate_draft_status_response(args):
'''
Returns a JSON response with the teams playing for the week
'''
response = get_db().get_current_picks(args['week'], args['year'])
return jsonify(response)
def validate_pick_team_request(params):
errors = []
if params.get('user_id') is None:
print("ERROR: no 'user_id' attribute found in rewquest.")
errors.append("No 'user_id' attribute found in request.")
if params.get('week') is None:
print("ERROR: no 'week' attribute found in request.")
errors.append("No 'week' attribute found in request.")
if params.get('pick') is None:
print("ERROR: no 'pick' attribute found in request.")
errors.append("No 'pick' attribute found in request.")
if params.get('team') is None:
print("ERROR: no 'team' attribute found in request.")
errors.append("No 'team' attribute found in request.")
if params.get('year') is None:
print("INFO: no 'year' attribute found in request. Adding the current year")
params['year'] = date.today().year
if len(errors) > 0:
error_message = '\n' + '\n\t'.join(errors)
abort(400, f"Invalid Request: {error_message}")
def generate_pick_team_response(args):
'''
Runs the pick_teams db call
Returns a JSON successful message upon completion
'''
get_db().select_team(args['user_id'], args['week'], args['pick'], args['team'], args['year'])
return jsonify({"success": True})
def validate_season_picks_request(params):
if params.get('year') is None:
print("INFO: no 'year' attribute found in request. Adding the current year")
params['year'] = date.today().year
def generate_season_picks_response(args):
'''
Returns a JSON response with the teams selected by week & user/player
'''
response = get_db().get_season_selections(args['year'])
return jsonify(response)
def validate_standings_request(params):
if params.get('year') is None:
print("INFO: no 'year' attribute found in request. Adding the current year")
params['year'] = date.today().year
def generate_standings_response(args):
'''
Returns a JSON response with NFL team information
'''
response = get_db().get_standings(args['year'])
return jsonify(response)
def validate_team_data_request(params):
if params.get('team_id') is None:
print("ERROR: no 'team_id' attribute found in request.")
abort(400, f"Invalid Request: no 'team_id' attribute found in request.")
def generate_team_data_response(args):
'''
Returns a JSON response with NFL team information
'''
response = get_db().get_team_info(args['team_id'])
return jsonify(response)
def validate_user_data_request(params):
if params.get('user_id') is None:
print("ERROR: no 'user_id' attribute found in request.")
abort(400, f"Invalid Request: no 'user_id' attribute found in request.")
def generate_user_data_response(args):
'''
Returns a JSON response with user information
'''
response = get_db().get_user_info(args['user_id'])
return jsonify(response)
def validate_complete_week_request(params):
errors = []
if params.get('week') is None:
print("ERROR: no 'week' attribute found in request.")
abort(400, f"Invalid Request: no 'week' attribute found in request.")
if params.get('year') is None:
print("INFO: no 'year' attribute found in request. Adding the current year")
params['year'] = date.today().year
if len(errors) > 0:
error_message = '\n' + '\n\t'.join(errors)
abort(400, f"Invalid Request: {error_message}")
def generate_complete_week_response(args):
'''
Returns a JSON response verifying the week was completed
'''
response = get_db().complete_week(args['week'], args['year'])
return jsonify({"success": True})
def validate_reset_week_request(params):
errors = []
if params.get('week') is None:
print("ERROR: no 'week' attribute found in request.")
abort(400, f"Invalid Request: no 'week' attribute found in request.")
if params.get('year') is None:
print("INFO: no 'year' attribute found in request. Adding the current year")
params['year'] = date.today().year
if len(errors) > 0:
error_message = '\n' + '\n\t'.join(errors)
abort(400, f"Invalid Request: {error_message}")
def generate_reset_week_response(args):
'''
Returns a JSON response verifying the draft picks were reset
'''
response = get_db().reset_picks(args['week'], args['year'])
return jsonify({"success": True})
def validate_sim_games_request(params):
errors = []
if params.get('week') is None:
print("ERROR: no 'week' attribute found in request.")
abort(400, f"Invalid Request: no 'user_id' attribute found in request.")
if params.get('year') is None:
print("INFO: no 'year' attribute found in request. Adding the current year")
params['year'] = date.today().year
if len(errors) > 0:
error_message = '\n' + '\n\t'.join(errors)
abort(400, f"Invalid Request: {error_message}")
def generate_sim_games_response(args):
'''
Returns a JSON response verifying the games were simulated for the week
'''
response = get_db().sim_week(args['week'], args['year'])
return jsonify({"success": True})
@app.route('/api/current_week', methods=['GET'])
def api_current_week():
'''
Route for the API to get the current week
'''
request_args = dict(request.args)
validate_current_week_request(request_args)
return generate_current_week_response(request_args)
@app.route('/api/teams_playing', methods=['GET'])
def api_get_teams_playing():
'''
Route for the API to get the teams playing
'''
request_args = dict(request.args)
validate_teams_playing_request(request_args)
return generate_teams_playing_response(request_args)
@app.route('/api/draft_status', methods=['GET'])
def api_draft_status():
'''
Route for the API to get the draft status
'''
request_args = dict(request.args)
validate_draft_status_request(request_args)
return generate_draft_status_response(request_args)
@app.route('/api/pick_team', methods=['POST'])
def api_pick_team():
'''
Route for the API to make a draft selection
'''
request_data = json.loads(request.data)
validate_pick_team_request(request_data)
return generate_pick_team_response(request_data)
@app.route('/api/season_selections', methods=['GET'])
def api_season_picks():
'''
Route for the API to get the league's picks for the entire season
'''
request_args = dict(request.args)
validate_season_picks_request(request_args)
return generate_season_picks_response(request_args)
@app.route('/api/standings', methods=['GET'])
def api_standings():
'''
Route for the API to get the standings of the league
'''
request_args = dict(request.args)
validate_standings_request(request_args)
return generate_standings_response(request_args)
@app.route('/api/team_data', methods=['GET'])
def api_team_data():
'''
Route for the API to get information about an NFL team
'''
request_args = dict(request.args)
validate_team_data_request(request_args)
return generate_team_data_response(request_args)
@app.route('/api/user_data', methods=['GET'])
def api_user_data():
'''
Route for the API to get information about a user
'''
request_args = dict(request.args)
validate_user_data_request(request_args)
return generate_user_data_response(request_args)
@app.route('/api/complete_week', methods=['POST'])
def api_complete_week():
'''
Route for the API to complete the week and move to the next week
'''
request_data = json.loads(request.data)
validate_complete_week_request(request_data)
return generate_complete_week_response(request_data)
@app.route('/api/reset_week', methods=['POST'])
def api_reset_week():
'''
Route for the API to reset the draft picks for a week
'''
request_data = json.loads(request.data)
validate_reset_week_request(request_data)
return generate_reset_week_response(request_data)
@app.route('/api/sim_games', methods=['POST'])
def api_sim_games():
'''
Route for the API to sim the games for a week
'''
request_data = json.loads(request.data)
validate_sim_games_request(request_data)
return generate_sim_games_response(request_data)
if __name__ == '__main__':
# Run the app
debug = os.environ.get('DEBUG', 'false').lower() == 'true'
app.run(host='0.0.0.0', port=5000, debug=debug)
|
zrahn93/jfl
|
jfl_services/run.py
|
run.py
|
py
| 11,376 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "db.jfl_db",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "flask.g",
"line_number": 18,
"usage_type": "argument"
},
{
"api_name": "db.jfl_db",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "db.jfl_db",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "db.jfl_db.Database",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "db.jfl_db",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "datetime.date.today",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 246,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 266,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 266,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "flask.request.data",
"line_number": 276,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 276,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 286,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 286,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 296,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 296,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 306,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 306,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 316,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 316,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "flask.request.data",
"line_number": 326,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 326,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "flask.request.data",
"line_number": 336,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 336,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "flask.request.data",
"line_number": 346,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 346,
"usage_type": "name"
},
{
"api_name": "os.environ.get",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 353,
"usage_type": "attribute"
}
] |
43242321794
|
# Same as second example, but using F1 (ALM)
import casadi.casadi as cs
import opengen as og
import json
nu = 3
np = 1
u = cs.SX.sym("u", nu)
p = cs.SX.sym("p", np)
f = cs.dot(u, u)
for i in range(nu):
f += p * u[i]
F1 = cs.sin(u[0]) - 0.3
C = og.constraints.Zero()
U = og.constraints.Ball2(None, 0.5)
problem = og.builder.Problem(u, p, f) \
.with_constraints(U) \
.with_aug_lagrangian_constraints(F1, C)
meta = og.config.OptimizerMeta() \
.with_version("0.0.0") \
.with_authors(["Shane Trimble"]) \
.with_licence("CC4.0-By") \
.with_optimizer_name("shane")
build_config = og.config.BuildConfiguration() \
.with_build_directory("python_build") \
.with_build_mode("debug") \
.with_tcp_interface_config()
solver_config = og.config.SolverConfiguration() \
.with_tolerance(1e-5) \
.with_initial_tolerance(1e-5) \
.with_initial_penalty(10) \
.with_penalty_weight_update_factor(2) \
.with_max_outer_iterations(20)
builder = og.builder.OpEnOptimizerBuilder(problem,
metadata=meta,
build_configuration=build_config,
solver_configuration=solver_config)
builder.build()
mng = og.tcp.OptimizerTcpManager('python_build/shane')
mng.start()
pong = mng.ping() # check if the server is alive
print(pong)
solution = mng.call([1.0]) # call the solver over TCP
print(json.dumps(solution, indent=4, sort_keys=False))
mng.kill()
|
BjoernLindqvist/Crazyflie_NMPC
|
third_example.py
|
third_example.py
|
py
| 1,718 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "casadi.casadi.SX.sym",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "casadi.casadi.SX",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "casadi.casadi",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "casadi.casadi.SX.sym",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "casadi.casadi.SX",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "casadi.casadi",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "casadi.casadi.dot",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "casadi.casadi",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "casadi.casadi.sin",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "casadi.casadi",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "opengen.constraints.Zero",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "opengen.constraints",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "opengen.constraints.Ball2",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "opengen.constraints",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "opengen.builder.Problem",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "opengen.builder",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "opengen.config.OptimizerMeta",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "opengen.config",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "opengen.config.BuildConfiguration",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "opengen.config",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "opengen.config.SolverConfiguration",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "opengen.config",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "opengen.builder.OpEnOptimizerBuilder",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "opengen.builder",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "opengen.tcp.OptimizerTcpManager",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "opengen.tcp",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 54,
"usage_type": "call"
}
] |
74348094589
|
from collections import namedtuple, defaultdict
from itertools import combinations, product
from math import sqrt
from typing import List
INPUTTEST = 'inputtest.txt'
INPUTREAL = 'input.txt'
def getLines(fileName):
file = open(fileName,'r')
lines = file.read().splitlines()
file.close()
return lines
class Point3(namedtuple('Point', 'x y z')):
def __repr__(self):
return f'{self.x},{self.y},{self.z}'
# performed x y z rotations in a loop and stored in a set
rotations = [([2, 0, 1], [-1, -1, 1]), ([0, 1, 2], [1, -1, -1]), ([2, 1, 0], [-1, -1, -1]), ([2, 1, 0], [1, -1, 1]),
([0, 2, 1], [-1, -1, -1]), ([1, 2, 0], [1, -1, -1]), ([1, 0, 2], [-1, -1, -1]), ([1, 2, 0], [1, 1, 1]),
([0, 2, 1], [-1, 1, 1]), ([0, 1, 2], [-1, 1, -1]), ([0, 2, 1], [1, -1, 1]), ([2, 0, 1], [-1, 1, -1]),
([1, 0, 2], [1, 1, -1]), ([2, 1, 0], [1, 1, -1]), ([2, 0, 1], [1, 1, 1]), ([2, 1, 0], [-1, 1, 1]),
([0, 1, 2], [1, 1, 1]), ([1, 0, 2], [1, -1, 1]), ([1, 0, 2], [-1, 1, 1]), ([0, 1, 2], [-1, -1, 1]),
([1, 2, 0], [-1, 1, -1]), ([1, 2, 0], [-1, -1, 1]), ([0, 2, 1], [1, 1, -1]), ([2, 0, 1], [1, -1, -1])]
def formatInput(lines):
scanners = {}
scannerCurrent = []
i = 0
lines.append("")
for line in filter(lambda x: not x.startswith("---"), lines):
if len(line) == 0 and len(scannerCurrent) > 0:
scanners[i] = scannerCurrent
scannerCurrent = []
i += 1
else:
scannerCurrent.append([int(t) for t in line.split(",")])
return scanners
def partOne(scanners):
intersects = getIntersects(scanners)
mapping_dict = createMappings(intersects, scanners)
beacons = set(toPoint(p) for p in scanners[0])
used_mappings = set()
transformed_scanners = {0}
scanner_origins = [[0, 0, 0]]
while len(transformed_scanners) < len(scanners):
queue = [k for k in mapping_dict.keys() if k[0] in transformed_scanners and k[1] not in transformed_scanners]
while len(queue) > 0:
el = queue.pop()
if el[1] in transformed_scanners:
continue
p_transpose = list(zip(*scanners[el[1]]))
centroid = list(zip([0, 0, 0])) # origin relative to scanner itself is 0, 0, 0
use_mapping = el
while True:
centroid = transform(centroid, *mapping_dict[use_mapping])
p_transpose = transform(p_transpose, *mapping_dict[use_mapping])
new_points = set(toPoint(p) for p in zip(*p_transpose))
if use_mapping[0] == 0:
break
for mapping in used_mappings:
if mapping[1] == use_mapping[0]:
use_mapping = mapping
break
scanner_origins.append([centroid[0][0], centroid[1][0], centroid[2][0]])
transformed_scanners.add(el[1])
beacons.update(new_points)
used_mappings.add(el)
return len(beacons), scanner_origins
def partTwo(scanner_origins):
return max(sum(map(lambda x: abs(x[0] - x[1]), zip(*p))) for p in combinations(scanner_origins, 2))
def createMappings(intersects, scanners):
mappingDict = {}
for i in intersects:
pointToDistA = defaultdict(set)
for p in combinations(scanners[i[0]], 2):
dist = euclidDist(*p)
pointToDistA[toPoint(p[0])].add(dist)
pointToDistA[toPoint(p[1])].add(dist)
pointToDistB = defaultdict(set)
for p in combinations(scanners[i[1]], 2):
dist = euclidDist(*p)
pointToDistB[toPoint(p[0])].add(dist)
pointToDistB[toPoint(p[1])].add(dist)
pointsA = []
pointsB = []
for p in product(pointToDistA.keys(), pointToDistB.keys()):
intersect = pointToDistA[p[0]].intersection(pointToDistB[p[1]])
if len(intersect) >= 11: # 12 common beacons 1 src and 11 dst for distance
pointsA.append(pointToList(p[0]))
pointsB.append(pointToList(p[1]))
mappingDict[i] = mapScannerAToB(pointsA, pointsB)
return mappingDict
def mapScannerAToB(pointsA, pointsB):
aTranspose = list(zip(*pointsA))
bTranspose = list(zip(*pointsB))
for perms, signs in rotations:
rotated = rotate(bTranspose, perms, signs)
offset = []
for p in zip(rotated, aTranspose):
points = set([x[1] - x[0] for x in zip(p[0], p[1])])
if len(points) == 1:
offset.append(points.pop())
if len(offset) == 3:
return offset, perms, signs
return None
def transform(itemToTransform, centerOfTarget, transformPerm, transformSign):
rotated = rotate(itemToTransform, transformPerm, transformSign)
return [list(map(lambda x: centerOfTarget[i] + x, p)) for i, p in enumerate(rotated)]
def getIntersects(scanners):
intersections = []
distDict = {i: set(euclidDist(*p) for p in combinations(scanners[i], 2)) for i in scanners.keys()}
for i in combinations(range(len(scanners)), 2):
if len(distDict[i[0]].intersection(distDict[i[1]])) >= 66:
intersections.append(i)
intersections.append((i[1], i[0]))
return intersections
def rotate(point, perms, signs):
return map(lambda n: n * signs[0], point[perms[0]]), \
map(lambda n: n * signs[1], point[perms[1]]), \
map(lambda n: n * signs[2], point[perms[2]])
def euclidDist(a, b):
return sqrt(sum(map(lambda x: pow(x[0] - x[1], 2), zip(a, b))))
def toPoint(plist):
if len(plist) == 3:
return Point3(plist[0], plist[1], plist[2])
else:
raise Exception("Can't cover to point")
def pointToList(p):
return [p.x, p.y, p.z]
lines = getLines(INPUTREAL)
scanners = formatInput(lines)
part1, centroids = partOne(scanners)
'''Part One'''
print(part1)
'''Part Two'''
print(partTwo(centroids))
|
David-Hatcher/AoC2021
|
Day 19/Day19.py
|
Day19.py
|
py
| 5,987 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "collections.namedtuple",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 142,
"usage_type": "call"
}
] |
15821882121
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
from audio_common_msgs.msg import AudioData
import os
import argparse
import pyaudio
import wave
import datetime
class AudioCapture:
def __init__(
self,
is_record_topic,
audio_data_topic,
num_channels,
sample_rate,
chunk_size,
format_size,
file_name_prefix='',
out_file_directory='audio',
):
rospy.init_node('audio_recorder', anonymous=True)
self._is_record_subscriber = rospy.Subscriber(is_record_topic, Bool, self._record_callback, queue_size=1)
self._audio_data_topic = rospy.Subscriber(audio_data_topic, AudioData, self._audio_data_callback, queue_size=1)
self._num_channels = num_channels
self._sample_rate = sample_rate
self._chunk_size = chunk_size
self._format_size = format_size
self._file_name_prefix = file_name_prefix
if len(self._file_name_prefix) > 0:
self._file_name_prefix += '_'
rospy.loginfo(self._file_name_prefix)
self._out_directory = out_file_directory
self._start_record_datetime = None
self._audio_data = None
def _record_callback(self, data):
is_record = data.data
self._record(is_record)
def _record(self, is_record):
if is_record:
if self._is_recording():
rospy.logerr("Already recording audio")
else:
rospy.loginfo("Starting to record audio")
self._audio_data = []
self._start_record_datetime = datetime.datetime.now()
else:
if self._is_recording():
rospy.loginfo("Stopped recording audio")
self._save_recording(self._audio_data)
# Clean up
self._audio_data = None
self._start_record_datetime = None
else:
rospy.logerr("No recording in progress")
def _audio_data_callback(self, data):
if self._is_recording():
self._audio_data.append(data.data)
def _is_recording(self):
return self._start_record_datetime is not None
def _save_recording(self, audio_data):
if not os.path.exists(self._out_directory):
os.makedirs(self._out_directory)
file_name = "{prefix}{date_str}.{ext}".format(
prefix=self._file_name_prefix,
date_str=datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'),
ext='wav'
)
file_path = os.path.join(self._out_directory, file_name)
wf = wave.open(file_path, 'wb')
wf.setnchannels(self._num_channels)
wf.setsampwidth(pyaudio.PyAudio().get_sample_size(self._format_size))
wf.setframerate(self._sample_rate)
wf.setnframes(self._chunk_size)
wf.writeframes(b''.join(audio_data))
wf.close()
if __name__ == "__main__":
# Getting the instance_id for the parameters
parser = argparse.ArgumentParser(description='instance_id for audio recording')
parser.add_argument('--instance_id', help='instance_id for parameters namespace', default="1")
args, _ = parser.parse_known_args()
# Getting the values as params
is_record_topic = rospy.get_param("/data_capture/"+args.instance_id+"/audio_capture/is_record_topic", "audio_capture/is_record")
audio_topic = rospy.get_param("/data_capture/"+args.instance_id+"/audio_capture/audio_topic", "audio/audio")
output_directory = rospy.get_param("/data_capture/"+args.instance_id+"/audio_capture/output_directory", "/root/audio")
num_channels = rospy.get_param("/data_capture/"+args.instance_id+"/audio_capture/num_channels", 1)
sample_rate = rospy.get_param("/data_capture/"+args.instance_id+"/audio_capture/sample_rate", 16000)
chunk_size = rospy.get_param("/data_capture/"+args.instance_id+"/audio_capture/chunk_size", 1024)
format_type = rospy.get_param("/data_capture/"+args.instance_id+"/audio_capture/format_type", "wave")
format_size = eval(rospy.get_param("/data_capture/"+args.instance_id+"/audio_capture/format_size", "pyaudio.paInt16"))
file_name_prefix = rospy.get_param("/data_capture/"+args.instance_id+"/audio_capture/file_name_prefix", '')
assert format_type == "wave"
AudioCapture(
is_record_topic=is_record_topic,
audio_data_topic=audio_topic,
out_file_directory=output_directory,
num_channels=num_channels,
sample_rate=sample_rate,
chunk_size=chunk_size,
format_size=format_size,
file_name_prefix=file_name_prefix
)
rospy.spin()
|
robotpt/ros-data-capture
|
src/data_capture/audio_capture2/scripts/capture.py
|
capture.py
|
py
| 4,708 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "rospy.init_node",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "rospy.Subscriber",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "std_msgs.msg.Bool",
"line_number": 32,
"usage_type": "argument"
},
{
"api_name": "rospy.Subscriber",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "audio_common_msgs.msg.AudioData",
"line_number": 33,
"usage_type": "argument"
},
{
"api_name": "rospy.loginfo",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "rospy.logerr",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "rospy.loginfo",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "rospy.loginfo",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "rospy.logerr",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "wave.open",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "pyaudio.PyAudio",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "rospy.get_param",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "rospy.get_param",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "rospy.get_param",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "rospy.get_param",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "rospy.get_param",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "rospy.get_param",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "rospy.get_param",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "rospy.get_param",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "rospy.get_param",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "rospy.spin",
"line_number": 131,
"usage_type": "call"
}
] |
27757527715
|
import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import util as u
def reset_param(t):
stdv = 2. / math.sqrt(t.size(0))
t.data.uniform_(-stdv,stdv)
class GCN_LSTM(nn.Module):
def __init__(self, args, activation, device='cpu'):
super().__init__()
self.lstm=nn.LSTM(
input_size=args.layer_2_feats,
hidden_size=args.lstm_feats,
num_layers=args.num_lstm_layers
)
# self.lstm = nn.GRU(
# input_size=args.layer_2_feats,
# hidden_size=args.lstm_l2_feats,
# num_layers=args.lstm_l2_layers
# )
self.device=device
self.activation=activation
self.num_layers=args.num_gcn_layers
self.choose_top_k=TopK(args.layer_2_feats, args.k)
self.w_list=nn.ParameterList()
for i in range(self.num_layers):
if i==0:
w_i=Parameter(torch.Tensor(args.feats_per_node, args.layer_1_feats))
reset_param(w_i)
else:
w_i=Parameter(torch.Tensor(args.layer_1_feats, args.layer_2_feats))
reset_param(w_i)
self.w_list.append(w_i)
def forward(self, A_list, node_feats, mask_list):
last_l_seq=[]
for t, Ahat in enumerate(A_list):
idx=mask_list[t]
Ahat, x=Ahat.to(self.device), node_feats.to(self.device)
x=x.matmul(self.w_list[0])
x[idx]=self.activation(Ahat.matmul(x[idx]))
for i in range(1, self.num_layers):
x=x.matmul(self.w_list[i])
x[idx]=self.activation(Ahat.matmul(x[idx]))
last_l_seq.append(x)
last_l_seq=torch.stack(last_l_seq)
out, _=self.lstm(last_l_seq, None)
return out[-1]
class TopK(torch.nn.Module):
def __init__(self,feats,k):
super().__init__()
self.scorer = Parameter(torch.Tensor(feats,1))
self.reset_param(self.scorer)
self.k = k
def reset_param(self,t):
#Initialize based on the number of rows
stdv = 1. / math.sqrt(t.size(0))
t.data.uniform_(-stdv,stdv)
def forward(self,node_embs):
scores = node_embs.matmul(self.scorer) / self.scorer.norm()
ll=node_embs.shape[0]
tanh = torch.nn.Tanh()
out=node_embs * tanh(scores.view(-1,1))
if ll<self.k:
t=node_embs[-1] * tanh(scores[-1])
t=t.unsqueeze(0).repeat(self.k-ll,1)
out =torch.cat([out, t], 0)
out=out[:self.k]
#we need to transpose the output
return out
|
sunny77889/DyGCN
|
compare_models/GCN_LSTM/gcn_lstm.py
|
gcn_lstm.py
|
py
| 2,679 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "math.sqrt",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torch.nn.LSTM",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.nn.ParameterList",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "torch.nn.parameter.Parameter",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.nn.parameter.Parameter",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.parameter.Parameter",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "torch.nn.Tanh",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "torch.cat",
"line_number": 78,
"usage_type": "call"
}
] |
23837831992
|
'''
Script for building and visualization of v(x) function for fixed x0 and gamma
(OSCILLATING case of eigenfunction)
'''
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import utils
x0 = 0.41
gamma = 6.0
AFTER_TANGENT = False
SUFFIX_NAME = '_after_tangent' if AFTER_TANGENT else ''
CSV_FILE = f'../Tracer/Results/x0={x0:.2f}/x0={x0:.2f}_analytical' + SUFFIX_NAME + '.csv'
def get_omega(g):
'''
Return omega(gamma) value
:param g: gamma value
:return: omega(gamma) value
'''
df = pd.read_csv(CSV_FILE, sep=';')
return df.loc[(df['gamma'] < g+utils.EPS) & (df['gamma'] > g-utils.EPS)]['w'].values[0]
def get_v_func_val(g, w, x):
'''
Calculate and return value of v(x) for fixed gamma
:param g: fixed gamma value
:param g: fixed omega value
:param x: x value
:return: v(x) for fixed gamma
'''
if g <= 0:
mu = np.sqrt(-g + w*1.0j)
return np.cosh(mu*x)
else:
mu = np.sqrt(g + w*1.0j)
return np.cos(mu*x)
def draw_v_func_components(xs, vs):
'''
Draw 2 graphics: Re(v(x)) dependency and Im(v(x)) dependency
:param xs: x values
:param vs: v(x) values
'''
f = plt.figure(figsize=(10, 4))
f.canvas.set_window_title('x0={:.2}__g={:.4}'.format(x0, gamma))
f.subplots_adjust(left=0.07, bottom=0.1, right=0.97, top=0.97, hspace=0.5)
ax1 = f.add_subplot(121)
ax2 = f.add_subplot(122)
ax1.set_xlabel('x')
ax1.set_ylabel('Re v', rotation='horizontal', position=(0.0, 0.53))
ax1.grid()
ys = [v.real for v in vs]
ax1.plot(xs, ys, color='seagreen', linewidth=2, zorder=3)
ax1.axhline(y=0.0, linewidth=2, color='grey', zorder=2)
ax1.axvline(x=0.0, linewidth=2, color='grey', zorder=2)
ax2.set_xlabel('x')
ax2.set_ylabel('Im v', rotation='horizontal', position=(0.0, 0.53))
ax2.grid()
ys = [v.imag for v in vs]
ax2.plot(xs, ys, color='peru', linewidth=2, zorder=3)
ax2.axhline(y=0.0, linewidth=2, color='grey', zorder=2)
ax2.axvline(x=0.0, linewidth=2, color='grey', zorder=2)
plt.show()
if __name__ == '__main__':
w = get_omega(gamma)
xs = np.linspace(0, 1, 10000)
vs = [get_v_func_val(gamma, w, x) for x in xs]
draw_v_func_components(xs, vs)
|
leonel11/KaschenkoEquation
|
Scripts/oscillating_draw_v_function.py
|
oscillating_draw_v_function.py
|
py
| 2,269 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "utils.EPS",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.cosh",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 76,
"usage_type": "call"
}
] |
13351154708
|
# -*- coding: utf-8 -*-
#electrical calculator
import math
import cmath
import numpy as np
import matplotlib.pyplot as plot
from matplotlib.offsetbox import AnchoredText
#three phase power calculations
def singlePhaseLoad( powerConsumed, powerFactor, leadLag):
#powerConsumed in kW
#power factor
#leadLag = 0 - lead, 1 - lag
#get the angle from the pf
angle = math.acos(powerFactor)
#change angle depending on leading or lagging pf.
if(leadLag == 1):
angle = 0-angle;
#calculate the apparent power
#S*pf = P
apparentPower = powerConsumed/powerFactor;
#get reactive Power
reactivePower = apparentPower*math.sin(angle);
#display values
print("Angle is: {} rad / {} degrees").format(angle, math.degrees(angle))
print("Apparent Power: {} VA").format(apparentPower)
print("Reactive Power is: {} VAR").format(reactivePower);
def threePhaseLoad( powerConsumed, powerFactor, leadLag, voltagel2l):
"""
Para: powerConsumed - power consumed by the 3phase load in W
powerFactor - pf
leadLag - leading pf - 0, lagging pf - 1
voltage121 - line to line voltage across the 3 phase load
"""
#powerConsumed in kW
#power factor
#leadLag = 0 - lead, 1 - lag
#get the angle from the pf
angle = math.acos(powerFactor)
#change angle depending on leading or lagging pf.
if(leadLag == 1):
angle = 0-angle;
#calculate the apparent power
#S*pf = P
apparentPower = powerConsumed/powerFactor;
#get reactive Power
reactivePower = apparentPower*math.sin(angle);
#get the line to line current
currentl2l = apparentPower/(math.sqrt(3)*voltagel2l);
#display values
print("Angle is: {} rad / {} degrees").format(angle, math.degrees(angle))
print("Apparent Power: {} VA").format(apparentPower)
print("Reactive Power is: {} VAR").format(reactivePower);
print("l2l Current is: {} A").format(currentl2l);
def polarRec(mod, angleDeg):
"""
Converts from polar to rectangular coordinates
return complexNumber
"""
return cmath.rect(mod,math.radians(angleDeg))
def recPolar(complexNumber):
"""
Converts from rectangular to polar coordinates.
return (mod, angle in deg)
"""
ans = cmath.polar(complexNumber)
return (ans[0], math.degrees(ans[1]))
def paraImp(number1, number2):
"""
Calculates the total impedence of 2 impedences in parallel
"""
return (number1*number2)/(number1 + number2)
def impBaseConv(voltageRating, powerRating, voltageBase, powerBase, impedenceValue):
"""
Parameters of equipment are given using the power rating of the equipment as the MVA base.
This function converts Z values from old rating to new rating
voltageRating - voltage rating of the equipment
powerRating - powerRating of the equipment
powerBase - the power base of where the equipment is being used
voltageBase - the voltage base of where the equipmment is being used
impedenceValue - the value to be converted between bases
The formula for Zbase = Vbase**2/Sbase
"""
return impedenceValue*((voltageRating**2/powerRating)/(voltageBase**2/powerBase))
def smibTransCalc(Egen, Vpoc, Xeq, EgenPost, VpocPost, XeqPost):
#pre fault graph
delta = np.arange(0, 3.14, 0.1);
Pe = (abs(Egen)*abs(Vpoc)*np.sin(delta))/abs(Xeq)
#post fault graph
Pepost = (abs(EgenPost)*abs(VpocPost)*np.sin(delta))/abs(XeqPost)
f,ax = plot.subplots(1,1)
ax.plot(delta, Pe) #pre fault
ax.plot(delta, Pepost) #post fault
ax.plot(delta, [ abs(Vpoc) for i in delta]) #mechanical power input
plot.title("Power Curve")
plot.xlabel("Power Angle Delta (rad)")
plot.ylabel("Power (p.u or W)")
plot.grid(True, which='both')
#plot.text("Pe = {}sin(del)").format((abs(Egen)*abs(Vpoc))/abs(Xeq))
anchored_text = AnchoredText("Pe = {}sin(del)".format((abs(Egen)*abs(Vpoc))/abs(Xeq)), loc=2)
ax.add_artist(anchored_text)
d0 = math.asin(Vpoc/((abs(Egen)*abs(Vpoc))/abs(Xeq)))
d1 = math.asin(Vpoc/((abs(EgenPost)*abs(VpocPost))/abs(XeqPost)))
print(d0)
print(d1)
plot.show()
|
vdatl5/electricalCalculator
|
elecCalc.py
|
elecCalc.py
|
py
| 4,282 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "math.acos",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "math.degrees",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "math.acos",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "math.degrees",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "cmath.rect",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "math.radians",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "cmath.polar",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "math.degrees",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "matplotlib.offsetbox.AnchoredText",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "math.asin",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "math.asin",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 138,
"usage_type": "name"
}
] |
39608443773
|
import datetime
import os
import re
import urllib.parse
from itertools import groupby
from django import forms as django_forms
from django.conf import settings
from django.core.paginator import Paginator, InvalidPage
from django import urls
from django.forms import fields
from django.http import HttpResponse, HttpResponseNotFound, Http404, \
HttpResponseRedirect, HttpResponsePermanentRedirect
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.template.defaultfilters import filesizeformat
from django.utils import html
from django.views.decorators.vary import vary_on_headers
from core.utils.url import unpack_url_path
from core import models
from core import solr_index
from core.rdf import title_to_graph, issue_to_graph, page_to_graph
from core.utils.utils import HTMLCalendar, _get_tip, _stream_file, \
_page_range_short, _rdf_base, get_page, label, create_crumbs
from core.decorator import cache_page, rdf_view
@cache_page(settings.DEFAULT_TTL_SECONDS)
def issues(request, year=None):
issues = models.Issue.objects.all().order_by('date_issued')
year_view, select_year_form = _create_year_form(issues, year, True)
page_title = "Browse All Issues"
page_name = "issues"
crumbs = list(settings.BASE_CRUMBS)
return render(request, 'issues.html', locals())
@cache_page(settings.DEFAULT_TTL_SECONDS)
def issues_title(request, lccn, year=None):
title = get_object_or_404(models.Title, lccn=lccn)
issues = title.issues.all()
year_view, select_year_form = _create_year_form(issues, year, False)
page_title = "Browse Issues: %s" % title.display_name
page_name = "issues_title"
crumbs = create_crumbs(title)
return render(request, 'issues_title.html', locals())
@cache_page(settings.DEFAULT_TTL_SECONDS)
def title_holdings(request, lccn):
title = get_object_or_404(models.Title, lccn=lccn)
page_title = "Libraries that Have It: %s" % label(title)
page_name = "holdings"
crumbs = create_crumbs(title)
holdings = title.holdings.select_related('institution').order_by('institution__name')
return render(request, 'holdings.html', locals())
@cache_page(settings.DEFAULT_TTL_SECONDS)
def title_marc(request, lccn):
title = get_object_or_404(models.Title, lccn=lccn)
page_title = "MARC Bibliographic Record: %s" % label(title)
page_name = "marc"
crumbs = create_crumbs(title)
return render(request, 'marc.html', locals())
@cache_page(settings.DEFAULT_TTL_SECONDS)
@rdf_view
def title_rdf(request, lccn):
title = get_object_or_404(models.Title, lccn=lccn)
graph = title_to_graph(title)
response = HttpResponse(graph.serialize(base=_rdf_base(request),
include_base=True),
content_type='application/rdf+xml')
return response
@cache_page(settings.DEFAULT_TTL_SECONDS)
def title_atom(request, lccn, page_number=1):
title = get_object_or_404(models.Title, lccn=lccn)
issues = title.issues.all().order_by('-batch__created', '-date_issued')
paginator = Paginator(issues, 100)
try:
page = paginator.page(page_number)
except InvalidPage:
raise Http404("No such page %s for title feed" % page_number)
# figure out the time the title was most recently updated
# via the create date of the batch
issues = page.object_list
num_issues = issues.count()
if num_issues > 0:
feed_updated = issues[0].batch.created
else:
feed_updated = title.created
host = request.get_host()
return render(request, 'title.xml', locals(),
content_type='application/atom+xml')
@cache_page(settings.DEFAULT_TTL_SECONDS)
def title_marcxml(request, lccn):
title = get_object_or_404(models.Title, lccn=lccn)
return HttpResponse(title.marc.xml, content_type='application/marc+xml')
@cache_page(settings.DEFAULT_TTL_SECONDS)
def issue_pages(request, lccn, date, edition, page_number=1):
title = get_object_or_404(models.Title, lccn=lccn)
_year, _month, _day = date.split("-")
try:
_date = datetime.date(int(_year), int(_month), int(_day))
except ValueError as e:
raise Http404
try:
issue = title.issues.filter(date_issued=_date,
edition=edition).order_by("-created")[0]
except IndexError as e:
raise Http404
issue_pages = []
for page in issue.pages.all():
# include both issue and page because of how metadata
# is being pulled in the template
issue_pages.append({'issue': issue, 'page': page})
paginator = Paginator(issue_pages, 20)
try:
page = paginator.page(page_number)
except InvalidPage:
page = paginator.page(1)
page_range_short = list(_page_range_short(paginator, page))
# set page number variables
if page.has_previous():
previous_page_number = int(page_number) - 1
if page.has_next():
next_page_number = int(page_number) + 1
if not page.object_list:
notes = issue.notes.filter(type="noteAboutReproduction")
num_notes = notes.count()
if num_notes >= 1:
display_label = notes[0].label
explanation = notes[0].text
page_title = 'All Pages: %s, %s' % (label(title), label(issue))
page_head_heading = "All Pages: %s, %s" % (title.display_name, label(issue))
page_head_subheading = label(title)
crumbs = create_crumbs(title, issue, date, edition)
response = render(request, 'issue_pages.html', locals())
return response
@cache_page(settings.DEFAULT_TTL_SECONDS)
@rdf_view
def issue_pages_rdf(request, lccn, date, edition):
title, issue, page = _get_tip(lccn, date, edition)
graph = issue_to_graph(issue)
response = HttpResponse(graph.serialize(base=_rdf_base(request),
include_base=True),
content_type='application/rdf+xml')
return response
@cache_page(settings.DEFAULT_TTL_SECONDS)
@vary_on_headers('Referer')
def page(request, lccn, date, edition, sequence, words=None):
fragments = []
if words:
fragments.append("words=" + words)
qs = request.META.get('QUERY_STRING')
if qs:
fragments.append(qs)
if fragments:
path_parts = dict(lccn=lccn, date=date, edition=edition,
sequence=sequence)
url = urls.reverse('openoni_page',
kwargs=path_parts)
return HttpResponseRedirect(url + "#" + "&".join(fragments))
title, issue, page = _get_tip(lccn, date, edition, sequence)
if not page.jp2_filename:
notes = page.notes.filter(type="noteAboutReproduction")
num_notes = notes.count()
if num_notes >= 1:
explanation = notes[0].text
else:
explanation = ""
# if no word highlights were requests, see if the user came
# from search engine results and attempt to highlight words from their
# query by redirecting to a url that has the highlighted words in it
if not words:
try:
words = _search_engine_words(request)
words = '+'.join(words)
if len(words) > 0:
path_parts = dict(lccn=lccn, date=date, edition=edition,
sequence=sequence, words=words)
url = urls.reverse('openoni_page_words',
kwargs=path_parts)
return HttpResponseRedirect(url)
except Exception as e:
if settings.DEBUG:
raise e
# else squish the exception so the page will still get
# served up minus the highlights
# Calculate the previous_issue_first_page. Note: it was decided
# that we want to skip over issues with missing pages. See ticket
# #383.
_issue = issue
while True:
previous_issue_first_page = None
_issue = _issue.previous
if not _issue:
break
previous_issue_first_page = _issue.first_page
if previous_issue_first_page:
break
# do the same as above but for next_issue this time.
_issue = issue
while True:
next_issue_first_page = None
_issue = _issue.next
if not _issue:
break
next_issue_first_page = _issue.first_page
if next_issue_first_page:
break
page_title = "%s, %s, %s" % (label(title), label(issue), label(page))
page_head_heading = "%s, %s, %s" % (title.display_name, label(issue), label(page))
page_head_subheading = label(title)
crumbs = create_crumbs(title, issue, date, edition, page)
filename = page.jp2_abs_filename
if filename:
try:
im = os.path.getsize(filename)
image_size = filesizeformat(im)
except OSError:
image_size = "Unknown"
image_credit = issue.batch.awardee.name
host = request.get_host()
static_url = settings.STATIC_URL
template = "page.html"
response = render(request, template, locals())
return response
@cache_page(settings.DEFAULT_TTL_SECONDS)
def titles(request, start=None, page_number=1):
page_title = 'Newspaper Titles'
if start:
page_title += ' Starting With %s' % start
titles = models.Title.objects.order_by('name_normal')
titles = titles.filter(name_normal__istartswith=start.upper())
else:
titles = models.Title.objects.all().order_by('name_normal')
paginator = Paginator(titles, 50)
try:
page = paginator.page(page_number)
except InvalidPage:
page = paginator.page(1)
page_start = page.start_index()
page_end = page.end_index()
page_range_short = list(_page_range_short(paginator, page))
browse_val = [chr(n) for n in range(65, 91)]
browse_val.extend([str(i) for i in range(10)])
collapse_search_tab = True
crumbs = list(settings.BASE_CRUMBS)
return render(request, 'titles.html', locals())
@cache_page(settings.DEFAULT_TTL_SECONDS)
def title(request, lccn):
title = get_object_or_404(models.Title, lccn=lccn)
page_title = label(title)
page_name = "title"
# we call these here, because the query the db, they are not
# cached by django's ORM, and we have some conditional logic
# in the template that would result in them getting called more
# than once. Short story: minimize database hits...
related_titles = title.related_titles()
succeeding_titles = title.succeeding_titles()
preceeding_titles = title.preceeding_titles()
notes = []
has_external_link = False
for note in title.notes.all():
org_text = html.escape(note.text)
text = re.sub('(http(s)?://[^\s]+[^\.])',
r'<a class="external" href="\1">\1</a>', org_text)
if text != org_text:
has_external_link = True
notes.append(text)
if title.has_issues:
rep_notes = title.first_issue.notes.filter(type="noteAboutReproduction")
num_notes = rep_notes.count()
if num_notes >= 1:
explanation = rep_notes[0].text
first_issue = title.first_issue
if first_issue:
issue_date = first_issue.date_issued
# add essay info on this page from either the database or from a template
first_essay = title.first_essay
essay_template = os.path.join(settings.ESSAY_TEMPLATES, title.lccn+".html")
crumbs = create_crumbs(title)
response = render(request, 'title.html', locals())
return response
@cache_page(settings.DEFAULT_TTL_SECONDS)
def titles_in_city(request, state, county, city,
page_number=1, order='name_normal'):
state, county, city = list(map(unpack_url_path, (state, county, city)))
page_title = "Titles in City: %s, %s" % (city, state)
titles = models.Title.objects.all()
if city:
titles = titles.filter(places__city__iexact=city)
if county:
titles = titles.filter(places__county__iexact=county)
if state:
titles = titles.filter(places__state__iexact=state)
titles = titles.order_by(order)
titles.distinct()
if titles.count() == 0:
raise Http404
paginator = Paginator(titles, 50)
try:
page = paginator.page(page_number)
except InvalidPage:
page = paginator.page(1)
page_range_short = list(_page_range_short(paginator, page))
return render(request, 'reports/city.html', locals())
@cache_page(settings.DEFAULT_TTL_SECONDS)
def titles_in_county(request, state, county,
page_number=1, order='name_normal'):
state, county = list(map(unpack_url_path, (state, county)))
page_title = "Titles in County: %s, %s" % (county, state)
titles = models.Title.objects.all()
if county:
titles = titles.filter(places__county__iexact=county)
if state:
titles = titles.filter(places__state__iexact=state)
titles = titles.order_by(order)
titles = titles.distinct()
if titles.count() == 0:
raise Http404
paginator = Paginator(titles, 50)
try:
page = paginator.page(page_number)
except InvalidPage:
page = paginator.page(1)
page_range_short = list(_page_range_short(paginator, page))
return render(request, 'reports/county.html', locals())
@cache_page(settings.DEFAULT_TTL_SECONDS)
def titles_in_state(request, state, page_number=1, order='name_normal'):
state = unpack_url_path(state)
page_title = "Titles in State: %s" % state
titles = models.Title.objects.all()
if state:
titles = titles.filter(places__state__iexact=state)
titles = titles.order_by(order)
titles = titles.distinct()
if titles.count() == 0:
raise Http404
paginator = Paginator(titles, 50)
try:
page = paginator.page(page_number)
except InvalidPage:
page = paginator.page(1)
page_range_short = list(_page_range_short(paginator, page))
return render(request, 'reports/state.html', locals())
# TODO: this redirect can go away some suitable time after 08/2010
# it predates having explicit essay ids
@cache_page(settings.DEFAULT_TTL_SECONDS)
def title_essays(request, lccn):
title = get_object_or_404(models.Title, lccn=lccn)
# if there's only one essay might as well redirect to it
if len(title.essays.all()) >= 1:
url = title.essays.all()[0].url
return HttpResponsePermanentRedirect(url)
else:
return HttpResponseNotFound()
def _create_year_form(issues, year, all_issues):
if issues.count() > 0:
if year is None:
_year = issues[0].date_issued.year
else:
_year = int(year)
else:
_year = 1900 # no issues available
year_view = HTMLCalendar(firstweekday=6, issues=issues, all_issues=all_issues).formatyear(_year)
dates = issues.dates('date_issued', 'year')
class SelectYearForm(django_forms.Form):
year = fields.ChoiceField(choices=((d.year, d.year) for d in dates), initial=_year)
year.widget.attrs["class"] = "form-select w-auto d-inline-block"
return year_view, SelectYearForm()
def _search_engine_words(request):
"""
Inspects the http request and returns a list of words from the OCR
text relevant to a particular search engine query. If the
request didn't come via a search engine result an empty list is
returned.
"""
# get the refering url
referer = request.META.get('HTTP_REFERER')
if not referer:
return []
uri = urllib.parse.urlparse(referer)
qs = urllib.parse.parse_qs(uri.query)
# extract a potential search query from refering url
if 'q' in qs:
words = qs['q'][0]
elif 'p' in qs:
words = qs['p'][0]
else:
return []
# ask solr for the pre-analysis words that could potentially
# match on the page. For example if we feed in 'buildings' we could get
# ['building', 'buildings', 'BUILDING', 'Buildings'] depending
# on the actual OCR for the page id that is passed in
words = words.split(' ')
words = solr_index.word_matches_for_page(request.path, words)
return words
@cache_page(settings.DEFAULT_TTL_SECONDS)
def page_ocr(request, lccn, date, edition, sequence):
title, issue, page = _get_tip(lccn, date, edition, sequence)
page_title = "%s, %s, %s" % (label(title), label(issue), label(page))
crumbs = create_crumbs(title, issue, date, edition, page)
host = request.get_host()
return render(request, 'page_text.html', locals())
def page_pdf(request, lccn, date, edition, sequence):
title, issue, page = _get_tip(lccn, date, edition, sequence)
return _stream_file(page.pdf_abs_filename, 'application/pdf')
def page_jp2(request, lccn, date, edition, sequence):
title, issue, page = _get_tip(lccn, date, edition, sequence)
return _stream_file(page.jp2_abs_filename, 'image/jp2')
def page_ocr_xml(request, lccn, date, edition, sequence):
title, issue, page = _get_tip(lccn, date, edition, sequence)
return _stream_file(page.ocr_abs_filename, 'application/xml')
def page_ocr_txt(request, lccn, date, edition, sequence):
title, issue, page = _get_tip(lccn, date, edition, sequence)
try:
text = page.ocr.text
return HttpResponse(text, content_type='text/plain')
except models.OCR.DoesNotExist:
raise Http404("No OCR for %s" % page)
@cache_page(settings.DEFAULT_TTL_SECONDS)
@rdf_view
def page_rdf(request, lccn, date, edition, sequence):
page = get_page(lccn, date, edition, sequence)
graph = page_to_graph(page)
response = HttpResponse(graph.serialize(base=_rdf_base(request),
include_base=True),
content_type='application/rdf+xml')
return response
@cache_page(settings.DEFAULT_TTL_SECONDS)
def page_print(request, lccn, date, edition, sequence,
width, height, x1, y1, x2, y2):
page = get_page(lccn, date, edition, sequence)
title = get_object_or_404(models.Title, lccn=lccn)
issue = page.issue
page_title = "%s, %s, %s" % (label(title), label(issue), label(page))
crumbs = create_crumbs(title, issue, date, edition, page)
host = request.get_host()
image_credit = page.issue.batch.awardee.name
path_parts = dict(lccn=lccn, date=date, edition=edition,
sequence=sequence,
width=width, height=height,
x1=x1, y1=y1, x2=x2, y2=y2)
url = urls.reverse('openoni_page_print',
kwargs=path_parts)
return render(request, 'page_print.html', locals())
@cache_page(settings.DEFAULT_TTL_SECONDS)
def issues_first_pages(request, lccn, page_number=1):
title = get_object_or_404(models.Title, lccn=lccn)
issues = title.issues.all()
if not issues.count() > 0:
raise Http404("No issues for %s" % title.display_name)
first_pages = []
for issue in issues:
# include both issue and page because in some cases
# an issue exists which has no associated pages
first_pages.append({'issue': issue, 'page': issue.first_page})
paginator = Paginator(first_pages, 20)
try:
page = paginator.page(page_number)
except InvalidPage:
page = paginator.page(1)
page_range_short = list(_page_range_short(paginator, page))
# set page number variables
if page.has_previous():
previous_page_number = int(page_number) - 1
if page.has_next():
next_page_number = int(page_number) + 1
page_title = 'Browse Issues: %s' % label(title)
page_head_heading = "Browse Issues: %s" % title.display_name
page_head_subheading = label(title)
crumbs = create_crumbs(title)
return render(request, 'issue_pages.html', locals())
|
open-oni/open-oni
|
core/views/browse.py
|
browse.py
|
py
| 19,948 |
python
|
en
|
code
| 43 |
github-code
|
6
|
[
{
"api_name": "core.models.Issue.objects.all",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "core.models.Issue",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "core.models",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.BASE_CRUMBS",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "core.decorator.cache_page",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEFAULT_TTL_SECONDS",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "core.models.Title",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "core.models",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "core.utils.utils.create_crumbs",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "core.decorator.cache_page",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEFAULT_TTL_SECONDS",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "core.models.Title",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "core.models",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "core.utils.utils.label",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "core.utils.utils.create_crumbs",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "core.decorator.cache_page",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEFAULT_TTL_SECONDS",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "core.models.Title",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "core.models",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "core.utils.utils.label",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "core.utils.utils.create_crumbs",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "core.decorator.cache_page",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEFAULT_TTL_SECONDS",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "core.models.Title",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "core.models",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "core.rdf.title_to_graph",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "core.utils.utils._rdf_base",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "core.decorator.cache_page",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEFAULT_TTL_SECONDS",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "core.decorator.rdf_view",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "core.models.Title",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "core.models",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.InvalidPage",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "core.decorator.cache_page",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEFAULT_TTL_SECONDS",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "core.models.Title",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "core.models",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "core.decorator.cache_page",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEFAULT_TTL_SECONDS",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "core.models.Title",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "core.models",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "datetime.date",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "django.http.Http404",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.InvalidPage",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "core.utils.utils._page_range_short",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "core.utils.utils.label",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "core.utils.utils.label",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "core.utils.utils.label",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "core.utils.utils.create_crumbs",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "core.decorator.cache_page",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEFAULT_TTL_SECONDS",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "core.utils.utils._get_tip",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "core.rdf.issue_to_graph",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "core.utils.utils._rdf_base",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "core.decorator.cache_page",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEFAULT_TTL_SECONDS",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "core.decorator.rdf_view",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "django.urls",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "core.utils.utils._get_tip",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "django.urls",
"line_number": 208,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEBUG",
"line_number": 212,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "core.utils.utils.label",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "core.utils.utils.label",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "core.utils.utils.label",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "core.utils.utils.create_crumbs",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "os.path.getsize",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 249,
"usage_type": "attribute"
},
{
"api_name": "django.template.defaultfilters.filesizeformat",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.STATIC_URL",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "core.decorator.cache_page",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEFAULT_TTL_SECONDS",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "django.views.decorators.vary.vary_on_headers",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "core.models.Title.objects.order_by",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "core.models.Title",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "core.models",
"line_number": 268,
"usage_type": "name"
},
{
"api_name": "core.models.Title.objects.all",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "core.models.Title",
"line_number": 271,
"usage_type": "attribute"
},
{
"api_name": "core.models",
"line_number": 271,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.InvalidPage",
"line_number": 275,
"usage_type": "name"
},
{
"api_name": "core.utils.utils._page_range_short",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.BASE_CRUMBS",
"line_number": 283,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 283,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "core.decorator.cache_page",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEFAULT_TTL_SECONDS",
"line_number": 263,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "core.models.Title",
"line_number": 289,
"usage_type": "attribute"
},
{
"api_name": "core.models",
"line_number": 289,
"usage_type": "name"
},
{
"api_name": "core.utils.utils.label",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "django.utils.html.escape",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "django.utils.html",
"line_number": 302,
"usage_type": "name"
},
{
"api_name": "re.sub",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 321,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings.ESSAY_TEMPLATES",
"line_number": 321,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 321,
"usage_type": "name"
},
{
"api_name": "core.utils.utils.create_crumbs",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "core.decorator.cache_page",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEFAULT_TTL_SECONDS",
"line_number": 287,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "core.utils.url.unpack_url_path",
"line_number": 330,
"usage_type": "argument"
},
{
"api_name": "core.models.Title.objects.all",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "core.models.Title",
"line_number": 332,
"usage_type": "attribute"
},
{
"api_name": "core.models",
"line_number": 332,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 343,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.InvalidPage",
"line_number": 348,
"usage_type": "name"
},
{
"api_name": "core.utils.utils._page_range_short",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "core.decorator.cache_page",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEFAULT_TTL_SECONDS",
"line_number": 327,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 327,
"usage_type": "name"
},
{
"api_name": "core.utils.url.unpack_url_path",
"line_number": 358,
"usage_type": "argument"
},
{
"api_name": "core.models.Title.objects.all",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "core.models.Title",
"line_number": 360,
"usage_type": "attribute"
},
{
"api_name": "core.models",
"line_number": 360,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 369,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.InvalidPage",
"line_number": 374,
"usage_type": "name"
},
{
"api_name": "core.utils.utils._page_range_short",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "core.decorator.cache_page",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEFAULT_TTL_SECONDS",
"line_number": 355,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 355,
"usage_type": "name"
},
{
"api_name": "core.utils.url.unpack_url_path",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "core.models.Title.objects.all",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "core.models.Title",
"line_number": 385,
"usage_type": "attribute"
},
{
"api_name": "core.models",
"line_number": 385,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 392,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.InvalidPage",
"line_number": 397,
"usage_type": "name"
},
{
"api_name": "core.utils.utils._page_range_short",
"line_number": 399,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 401,
"usage_type": "call"
},
{
"api_name": "core.decorator.cache_page",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEFAULT_TTL_SECONDS",
"line_number": 381,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 381,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "core.models.Title",
"line_number": 408,
"usage_type": "attribute"
},
{
"api_name": "core.models",
"line_number": 408,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponsePermanentRedirect",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseNotFound",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "core.decorator.cache_page",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEFAULT_TTL_SECONDS",
"line_number": 406,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 406,
"usage_type": "name"
},
{
"api_name": "core.utils.utils.HTMLCalendar",
"line_number": 425,
"usage_type": "call"
},
{
"api_name": "django.forms.Form",
"line_number": 428,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 428,
"usage_type": "name"
},
{
"api_name": "django.forms.fields.ChoiceField",
"line_number": 429,
"usage_type": "call"
},
{
"api_name": "django.forms.fields",
"line_number": 429,
"usage_type": "name"
},
{
"api_name": "urllib.parse.parse.urlparse",
"line_number": 447,
"usage_type": "call"
},
{
"api_name": "urllib.parse.parse",
"line_number": 447,
"usage_type": "attribute"
},
{
"api_name": "urllib.parse",
"line_number": 447,
"usage_type": "name"
},
{
"api_name": "urllib.parse.parse.parse_qs",
"line_number": 448,
"usage_type": "call"
},
{
"api_name": "urllib.parse.parse",
"line_number": 448,
"usage_type": "attribute"
},
{
"api_name": "urllib.parse",
"line_number": 448,
"usage_type": "name"
},
{
"api_name": "core.solr_index.word_matches_for_page",
"line_number": 463,
"usage_type": "call"
},
{
"api_name": "core.solr_index",
"line_number": 463,
"usage_type": "name"
},
{
"api_name": "core.utils.utils._get_tip",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "core.utils.utils.label",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "core.utils.utils.create_crumbs",
"line_number": 471,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 473,
"usage_type": "call"
},
{
"api_name": "core.decorator.cache_page",
"line_number": 467,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEFAULT_TTL_SECONDS",
"line_number": 467,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 467,
"usage_type": "name"
},
{
"api_name": "core.utils.utils._get_tip",
"line_number": 477,
"usage_type": "call"
},
{
"api_name": "core.utils.utils._stream_file",
"line_number": 478,
"usage_type": "call"
},
{
"api_name": "core.utils.utils._get_tip",
"line_number": 482,
"usage_type": "call"
},
{
"api_name": "core.utils.utils._stream_file",
"line_number": 483,
"usage_type": "call"
},
{
"api_name": "core.utils.utils._get_tip",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "core.utils.utils._stream_file",
"line_number": 488,
"usage_type": "call"
},
{
"api_name": "core.utils.utils._get_tip",
"line_number": 492,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 495,
"usage_type": "call"
},
{
"api_name": "core.models.OCR",
"line_number": 496,
"usage_type": "attribute"
},
{
"api_name": "core.models",
"line_number": 496,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 497,
"usage_type": "call"
},
{
"api_name": "core.utils.utils.get_page",
"line_number": 503,
"usage_type": "call"
},
{
"api_name": "core.rdf.page_to_graph",
"line_number": 504,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 505,
"usage_type": "call"
},
{
"api_name": "core.utils.utils._rdf_base",
"line_number": 505,
"usage_type": "call"
},
{
"api_name": "core.decorator.cache_page",
"line_number": 500,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEFAULT_TTL_SECONDS",
"line_number": 500,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 500,
"usage_type": "name"
},
{
"api_name": "core.decorator.rdf_view",
"line_number": 501,
"usage_type": "name"
},
{
"api_name": "core.utils.utils.get_page",
"line_number": 514,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 515,
"usage_type": "call"
},
{
"api_name": "core.models.Title",
"line_number": 515,
"usage_type": "attribute"
},
{
"api_name": "core.models",
"line_number": 515,
"usage_type": "name"
},
{
"api_name": "core.utils.utils.label",
"line_number": 517,
"usage_type": "call"
},
{
"api_name": "core.utils.utils.create_crumbs",
"line_number": 518,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 525,
"usage_type": "call"
},
{
"api_name": "django.urls",
"line_number": 525,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 528,
"usage_type": "call"
},
{
"api_name": "core.decorator.cache_page",
"line_number": 511,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEFAULT_TTL_SECONDS",
"line_number": 511,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 511,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 533,
"usage_type": "call"
},
{
"api_name": "core.models.Title",
"line_number": 533,
"usage_type": "attribute"
},
{
"api_name": "core.models",
"line_number": 533,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 536,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 544,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.InvalidPage",
"line_number": 547,
"usage_type": "name"
},
{
"api_name": "core.utils.utils._page_range_short",
"line_number": 549,
"usage_type": "call"
},
{
"api_name": "core.utils.utils.label",
"line_number": 557,
"usage_type": "call"
},
{
"api_name": "core.utils.utils.label",
"line_number": 559,
"usage_type": "call"
},
{
"api_name": "core.utils.utils.create_crumbs",
"line_number": 560,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 561,
"usage_type": "call"
},
{
"api_name": "core.decorator.cache_page",
"line_number": 531,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEFAULT_TTL_SECONDS",
"line_number": 531,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 531,
"usage_type": "name"
}
] |
27390985743
|
# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import os
import sys
sys.path.insert(0, os.path.abspath("../../"))
sys.path.insert(0, os.path.abspath("."))
import swiftzoom
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = 'SWIFTzoom'
copyright = '2023, Edoardo Altamura'
author = 'Edoardo Altamura'
release = swiftzoom.__version__
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"recommonmark",
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.mathjax",
"sphinx.ext.autosummary",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
source_suffix = [".rst", ".md"]
master_doc = "index"
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"collapse_navigation": False
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for automatic API doc
autodoc_member_order = "bysource"
autodoc_default_flags = ["members"]
autosummary_generate = True
# must be outside run_apidoc definition to be set successfully:
os.environ["SPHINX_APIDOC_OPTIONS"] = "members,undoc-members,show-inheritance"
def run_apidoc(_):
try:
from sphinx.ext.apidoc import main
except ImportError:
from sphinx.apidoc import main
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
cur_dir = os.path.abspath(os.path.dirname(__file__))
api_doc_dir = os.path.join(cur_dir, "modules")
module = os.path.join(cur_dir, "../..", "swiftzoom")
ignore = [
os.path.join(cur_dir, "../..", "tests"),
os.path.join(cur_dir, "../..", "swiftzoom/metadata"),
]
main(["-M", "-f", "-e", "-T", "-d 0", "-o", api_doc_dir, module, *ignore])
def setup(app):
app.connect("builder-inited", run_apidoc)
|
edoaltamura/swiftzoom
|
docs/source/conf.py
|
conf.py
|
py
| 3,219 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.path.insert",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "sys.path.insert",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "swiftzoom.__version__",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "sphinx.apidoc.main",
"line_number": 96,
"usage_type": "call"
}
] |
36917846701
|
import logging
import os
import sys
from queue import Empty
from threading import Thread
import argparse
import jsonpickle
from polarity_server import globals
from polarity_server.rest import RestApi
class App:
thread_run = True
@classmethod
def run(cls):
parser = argparse.ArgumentParser()
parser.add_argument("--port", "-p", required=False, type=int,
default=5000, help="Port for REST API to listen on")
parser.add_argument("--input", "-i", required=False, type=str,
help="File to preload sessions from")
if sys.argv == 1:
parser.print_help()
sys.exit(os.EX_SOFTWARE)
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
if args.input:
if os.path.isfile(args.input):
with open(args.input) as file:
data = file.read()
globals.sessions = jsonpickle.decode(data)
else:
logging.error("Invalid input file specified")
sys.exit(os.EX_SOFTWARE)
RestApi.start_server(args.port)
thread = Thread(target=cls.runner)
thread.start()
command = ""
while command != "quit":
command = input("Command (\"help\" for options): ")
if command == "help":
cls.print_usage()
elif "sessions" in command:
print("")
if not globals.sessions:
print("No active sessions")
else:
if len(command.split()) == 1:
for i, ip_address in enumerate(globals.sessions):
print("{} - {}".format(str(i), ip_address))
else:
session_idx = command.split()[1].strip()
for i, ip_address in enumerate(globals.sessions):
if str(i) == session_idx:
for session in globals.sessions[ip_address]:
print("{} - {}".
format(session.username, ip_address))
print("")
elif "interact" in command:
if len(command.split()) > 2:
session_idx = command.split()[1].strip()
username = command.split()[2].strip()
session = cls.find_session(session_idx, username)
if session:
if not session.shell.is_alive():
session.shell.create_connection()
session.shell.interact()
else:
print("\nNo session found for specified id and/or username\n")
else:
print("\nSession id and username not specified\n")
elif "save" in command:
if len(command.split()) > 1:
filename = command.split()[1].strip()
with open(filename) as file:
file.write(jsonpickle.encode(globals.sessions))
else:
print("\nFilename not specified\n")
elif command != "quit":
print("\nInvalid command\n")
RestApi.stop_server()
cls.thread_run = False
thread.join()
if globals.sessions:
for ip_address in globals.sessions:
for session in globals.sessions[ip_address]:
session.shell.close_connection()
sys.exit(os.EX_OK)
@staticmethod
def print_usage():
print("""
Usage:
help: print this message
quit: exit the program
sessions: print the active session hosts
sessions <id>: print the active session host username's
interact <session id> <username>: interact with host session
save <filename>: save current state to file
""")
@staticmethod
def find_session(idx, username):
for i, ip_address in enumerate(globals.sessions):
if str(i) == idx:
for session in globals.sessions[ip_address]:
if session.username == username:
return session
return None
@classmethod
def runner(cls):
while cls.thread_run:
task = cls.get_task()
if task:
sessions = task.execute()
if sessions:
globals.sessions.update(sessions)
@staticmethod
def get_task():
try:
return globals.task_queue.get(timeout=1.0)
except Empty:
return None
|
willmfftt/polarityserver
|
polarity_server/app/app.py
|
app.py
|
py
| 4,759 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.EX_SOFTWARE",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "polarity_server.globals.sessions",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "polarity_server.globals",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "jsonpickle.decode",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.EX_SOFTWARE",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "polarity_server.rest.RestApi.start_server",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "polarity_server.rest.RestApi",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "threading.Thread",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "polarity_server.globals.sessions",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "polarity_server.globals",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "polarity_server.globals.sessions",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "polarity_server.globals",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "polarity_server.globals.sessions",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "polarity_server.globals",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "polarity_server.globals.sessions",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "polarity_server.globals",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "jsonpickle.encode",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "polarity_server.globals.sessions",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "polarity_server.globals",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "polarity_server.rest.RestApi.stop_server",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "polarity_server.rest.RestApi",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "polarity_server.globals.sessions",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "polarity_server.globals",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "polarity_server.globals.sessions",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "polarity_server.globals",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "polarity_server.globals.sessions",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "polarity_server.globals",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.EX_OK",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "polarity_server.globals.sessions",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "polarity_server.globals",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "polarity_server.globals.sessions",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "polarity_server.globals",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "polarity_server.globals.sessions.update",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "polarity_server.globals.sessions",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "polarity_server.globals",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "polarity_server.globals.task_queue.get",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "polarity_server.globals.task_queue",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "polarity_server.globals",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "queue.Empty",
"line_number": 141,
"usage_type": "name"
}
] |
36818410851
|
import curses
import curses.ascii
from curses.textpad import Textbox
class MyTextPad(Textbox):
ignored_keys = {
curses.KEY_PPAGE, # Page Up
curses.KEY_NPAGE, # Page Down
}
def __init__(self, win, default):
super().__init__(win)
self.default = default
self.line = default
self._pos = len(default)
self.refresh()
@property
def pos(self):
return self._pos
@pos.setter
def pos(self, val):
self._pos = val
if self._pos < 0:
self._pos = 0
if self._pos > len(self.line):
self._pos = len(self.line)
@property
def cursor_pos(self):
y = self.pos // self.maxx
x = self.pos % self.maxx
return y, x
def refresh(self):
self.win.clear()
for y in range(self.maxy):
self.win.addstr(y, 0, self.line[self.maxx * y:self.maxx * (y + 1)])
self.win.move(*self.cursor_pos)
self.win.refresh()
def do_command(self, ch):
ordch = ord(ch) if isinstance(ch, str) else ch
ch = chr(ch) if isinstance(ch, int) else ch
if curses.KEY_BACKSPACE == ordch:
self.line = self.line[:self.pos -1] + self.line[self.pos:]
self.pos -= 1
elif curses.KEY_LEFT == ordch:
self.pos -= 1
elif curses.KEY_RIGHT == ordch:
self.pos += 1
elif curses.KEY_DOWN == ordch:
self.pos += self.maxx
elif curses.KEY_DC == ordch:
self.line = self.line[:self.pos] + self.line[self.pos + 1:]
elif curses.KEY_UP == ordch:
self.pos -= self.maxx
elif curses.KEY_HOME == ordch:
self.pos = 0
elif curses.KEY_END == ordch:
self.pos = len(self.line)
elif ordch in self.ignored_keys:
pass
elif '\n' == ch:
return 0
elif 27 == ordch:
return -1
elif ch.isprintable():
self.line = self.line[:self.pos] + ch + self.line[self.pos:]
self.pos += 1
return True
def gather(self):
return self.line.strip()
def edit(self, validate=None):
while 1:
ch = self.win.get_wch()
if validate:
ch = validate(ch)
if not ch:
continue
code = self.do_command(ch)
if code == -1:
return self.default
if not code:
break
self.refresh()
return self.gather()
|
AzaubaevViktor/tagging
|
console/my_textpad.py
|
my_textpad.py
|
py
| 2,571 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "curses.textpad.Textbox",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "curses.KEY_PPAGE",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "curses.KEY_NPAGE",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "curses.KEY_BACKSPACE",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "curses.KEY_LEFT",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "curses.KEY_RIGHT",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "curses.KEY_DOWN",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "curses.KEY_DC",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "curses.KEY_UP",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "curses.KEY_HOME",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "curses.KEY_END",
"line_number": 65,
"usage_type": "attribute"
}
] |
28176425279
|
import sqlite3
import json
from datetime import datetime
from traceback import print_tb
from helpers import create_table_if_not_exists, get_db_path, get_timeframe_path, format_data, \
acceptable, get_timeframes
timeframes = get_timeframes()
sql_transaction = []
start_row = 0
# start_row = 8400000 # that is where I stopped it last time
print(timeframes)
def find_parent(pid):
try:
sql = "SELECT comment FROM parent_reply WHERE comment_id = '{}' LIMIT 1".format(pid)
c.execute(sql)
result = c.fetchone()
if result is not None:
res = result[0]
if res is None or res == 'False' or res == '0':
return False
return res
else:
return False
except Exception as e:
print('find_parent', e)
print_tb(e)
return False
def find_existing_score(pid):
if pid is False:
return False
try:
sql = "SELECT score FROM parent_reply WHERE parent_id = '{}' LIMIT 1".format(pid)
c.execute(sql)
result = c.fetchone()
if result is not None:
return result[0]
else:
return False
except Exception as e:
print('find_existing_score', e)
print_tb(e)
return False
def transaction_bldr(sql, bindings = None):
global sql_transaction
sql_transaction.append([sql, bindings])
if len(sql_transaction) > 2000:
c.execute('BEGIN TRANSACTION')
for s, b in sql_transaction:
try:
if b is not None:
c.execute(s, b)
else:
c.execute(s)
# except Exception as e:
# print(str(datetime.now()), s, e)
except:
pass
connection.commit()
sql_transaction = []
def sql_insert_replace_comment(commentid, parentid, parent, comment, subreddit, time, score):
try:
sql = """UPDATE parent_reply
SET parent_id = ?,
comment_id = ?,
parent = ?,
comment = ?,
subreddit = ?,
unix = ?,
score = ?
WHERE parent_id = ?;"""
b = [parentid, commentid, parent, comment, subreddit, int(time), score, parentid]
transaction_bldr(sql, b)
except Exception as e:
print('sql_insert_replace_comment', e)
print_tb(e)
def sql_insert_has_parent(commentid, parentid, parent, comment, subreddit, time, score):
try:
sql = """INSERT INTO parent_reply (parent_id, comment_id, parent, comment, subreddit, unix, score)
VALUES (?, ?, ?, ?, ?, ?, ?);
"""
b = [parentid, commentid, parent, comment, subreddit, int(time), score]
transaction_bldr(sql, b)
except Exception as e:
print('sql_insert_has_parent', e)
print_tb(e)
def sql_insert_no_parent(commentid, parentid, comment, subreddit, time, score):
try:
sql = """INSERT INTO parent_reply (parent_id, comment_id, comment, subreddit, unix, score)
VALUES (?, ?, ?, ?, ?, ?);"""
b = [parentid, commentid, comment, subreddit, int(time), score]
transaction_bldr(sql, b)
except Exception as e:
print('sql_insert_no_parent', e)
print_tb(e)
for timeframe in timeframes:
with sqlite3.connect(get_db_path(timeframe)) as connection:
c = connection.cursor()
create_table_if_not_exists(c)
row_counter = 0
paired_rows = 0
# with open(get_timeframe_path(timeframe), buffering=1000) as f:
with open(get_timeframe_path(timeframe)) as f:
for row in f:
row_counter += 1
if row_counter >= start_row:
try:
row = json.loads(row)
parent_id = row['parent_id'].split('_')[1]
body = format_data(row['body'])
created_utc = row['created_utc']
score = row['score']
comment_id = row['id']
subreddit = row['subreddit']
parent_data = find_parent(parent_id)
existing_comment_score = find_existing_score(parent_id)
if existing_comment_score:
if score > existing_comment_score:
if acceptable(body):
sql_insert_replace_comment(comment_id, parent_id, parent_data, body, subreddit,
created_utc, score)
else:
if acceptable(body):
if parent_data:
if score >= 2:
sql_insert_has_parent(comment_id, parent_id, parent_data, body, subreddit,
created_utc, score)
paired_rows += 1
else:
sql_insert_no_parent(comment_id, parent_id, body, subreddit, created_utc, score)
except Exception as e:
print(e)
if row_counter % 100000 == 0:
print('Total Rows Read: {}, Paired Rows: {}, Time: {}'.format(row_counter, paired_rows,
str(datetime.now())))
# start from 0
start_row = 0
print('Done')
|
DuncteBot/chatbot
|
data_parser.py
|
data_parser.py
|
py
| 5,643 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "helpers.get_timeframes",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "traceback.print_tb",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "traceback.print_tb",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "traceback.print_tb",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "traceback.print_tb",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "traceback.print_tb",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "helpers.get_db_path",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "helpers.create_table_if_not_exists",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "helpers.get_timeframe_path",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "helpers.format_data",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "helpers.acceptable",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "helpers.acceptable",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 166,
"usage_type": "name"
}
] |
33952361000
|
from pyimagesearch.centroidtracker import CentroidTracker
from pyimagesearch.trackableobject import TrackableObject
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import time
import dlib
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True,
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True,
help="path to Caffe pre-trained model")
ap.add_argument("-i", "--input", type=str,
help="path to optional input video file")
ap.add_argument("-o", "--output", type=str,
help="path to optional output video file")
ap.add_argument("-c", "--confidence", type=float, default=0.4,
help="minimum probability to filter weak detections")
ap.add_argument("-s", "--skip-frames", type=int, default=30,
help="# of skip frames between detections")
args = vars(ap.parse_args())
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
if not args.get("input", False):
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
else:
print("[INFO] opening video file...")
vs = cv2.VideoCapture(args["input"])
writer = None
W = None
H = None
ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
trackers = []
trackableObjects = {}
totalFrames = 0
totalDown = 0
totalUp = 0
fps = FPS().start()
while True:
frame = vs.read()
frame = frame[1] if args.get("input", False) else frame
if args["input"] is not None and frame is None:
break
frame = imutils.resize(frame, width=500)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if W is None or H is None:
(H, W) = frame.shape[:2]
if args["output"] is not None and writer is None:
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(args["output"], fourcc, 30,
(W, H), True)
status = "Waiting"
rects = []
if totalFrames % args["skip_frames"] == 0:
status = "Detecting"
trackers = []
blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
net.setInput(blob)
detections = net.forward()
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > args["confidence"]:
idx = int(detections[0, 0, i, 1])
if CLASSES[idx] != "person":
continue
box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
(startX, startY, endX, endY) = box.astype("int")
tracker = dlib.correlation_tracker()
rect = dlib.rectangle(startX, startY, endX, endY)
tracker.start_track(rgb, rect)
trackers.append(tracker)
else:
for tracker in trackers:
status = "Tracking"
tracker.update(rgb)
pos = tracker.get_position()
startX = int(pos.left())
startY = int(pos.top())
endX = int(pos.right())
endY = int(pos.bottom())
rects.append((startX, startY, endX, endY))
cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)
objects = ct.update(rects)
for (objectID, centroid) in objects.items():
to = trackableObjects.get(objectID, None)
if to is None:
to = TrackableObject(objectID, centroid)
else:
y = [c[1] for c in to.centroids]
direction = centroid[1] - np.mean(y)
to.centroids.append(centroid)
if not to.counted:
if direction < 0 and centroid[1] < H // 2:
totalUp += 1
to.counted = True
elif direction > 0 and centroid[1] > H // 2:
totalDown += 1
to.counted = True
trackableObjects[objectID] = to
text = "ID {}".format(objectID)
cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
info = [
("Up", totalUp),
("Down", totalDown),
("Status", status),
]
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
cv2.putText(frame, "Crowd Monitor - Store Entry", (109,26),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
if writer is not None:
writer.write(frame)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
totalFrames += 1
fps.update()
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
if writer is not None:
writer.release()
if not args.get("input", False):
vs.stop()
else:
vs.release()
cv2.destroyAllWindows()
|
Nem3sisX/piedpiper-socialspace
|
Inside_Store_Model/run.py
|
run.py
|
py
| 4,748 |
python
|
en
|
code
| 6 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.dnn.readNetFromCaffe",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "imutils.video.VideoStream",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "pyimagesearch.centroidtracker.CentroidTracker",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "imutils.video.FPS",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "imutils.resize",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoWriter_fourcc",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "cv2.VideoWriter",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "cv2.dnn.blobFromImage",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "dlib.correlation_tracker",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "dlib.rectangle",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "cv2.line",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "pyimagesearch.trackableobject.TrackableObject",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "cv2.circle",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 191,
"usage_type": "call"
}
] |
14822509390
|
from sqlalchemy.orm import Session
from database_models import Task, TaskStatuses
from schemas import CreateTaskModel, UpdateTaskModel, DeleteTaskModel
from datetime import datetime
def create_task(db:Session, task: CreateTaskModel):
db_task = Task(
name = task.name,
description = task.description,
status = TaskStatuses.opened,
create_date = datetime.now()
)
db.add(db_task)
db.commit()
db.refresh(db_task)
return db_task
def update_task(db:Session, updated_task: UpdateTaskModel):
filtered_task = {k:v for k,v in updated_task.to_dict().items() if v is not None}
db.query(Task).filter(Task.id==updated_task.id).update(filtered_task)
db.commit()
return updated_task
def delete_task(db:Session, delete_task: DeleteTaskModel):
db.query(Task).filter(Task.id==delete_task.id).delete(synchronize_session=False)
db.commit()
return delete_task
def get_all_tasks(db:Session):
return db.query(Task).all()
|
maximzec/ToDoApp
|
crud.py
|
crud.py
|
py
| 993 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "schemas.CreateTaskModel",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "database_models.Task",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "database_models.TaskStatuses.opened",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "database_models.TaskStatuses",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "schemas.UpdateTaskModel",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "database_models.Task",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "database_models.Task.id",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "schemas.DeleteTaskModel",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "database_models.Task",
"line_number": 26,
"usage_type": "argument"
},
{
"api_name": "database_models.Task.id",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "database_models.Task",
"line_number": 33,
"usage_type": "argument"
}
] |
37018468843
|
import numpy as np
# randomly sampling 100 obsev from t-distribution
N = 1000
df = N-1
X = np.random.standard_t(df, size = N)
import matplotlib.pyplot as plt
from scipy.stats import t
x_values = np.arange(-5,5,0.1)
y_values = t.pdf(x_values,df)
# Sample Distribution
count, bins, ignored = plt.hist(X, 20, density = True,color = 'purple',label = 'Sample Distribution')
# Population Distribution
plt.plot(x_values,y_values, color = 'y', linewidth = 2.5,label = 'Population Distribution')
#adding title and y-label
plt.title("Randomly sampled from standard Student t-distribution")
plt.ylabel("Probability")
plt.legend()
plt.show()
|
TatevKaren/mathematics-statistics-for-data-science
|
Probability-Distribution-Functions/Student t distribution.py
|
Student t distribution.py
|
py
| 634 |
python
|
en
|
code
| 88 |
github-code
|
6
|
[
{
"api_name": "numpy.random.standard_t",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "scipy.stats.t.pdf",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "scipy.stats.t",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hist",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
}
] |
14764098844
|
import sys
from typing import List, Tuple
def _get_element_orders(arr: List[int],
key: int) -> Tuple[List[int], List[int], int]:
"""
return two lists - one of figures less than
key and one of those greater - and the count
of key in arr
"""
less, greater, equal = [], [], 0
for i in arr:
if i < key:
less.append(i)
elif i > key:
greater.append(i)
else:
equal += 1
return less, greater, equal
def _get_majority_element(arr: List[int],
majority_size: int) -> int:
"""
return the value of the majority element between
l and h if this element exists, and return
-1 if no such element exists
"""
less_than, greater_than, equal = _get_element_orders(arr, arr[0])
if equal > majority_size:
return arr[0]
elif len(less_than) > majority_size:
return _get_majority_element(less_than, majority_size)
elif len(greater_than) > majority_size:
return _get_majority_element(greater_than, majority_size)
else:
return -1
def get_majority_element(arr: List[int]) -> int:
"""
wrapper function for _get_majority_element which
calculates the majority size
"""
majority_size = len(arr) / 2
return _get_majority_element(arr, majority_size)
if __name__ == '__main__':
input_data = sys.stdin.read()
n, *a = list(map(int, input_data.split()))
if get_majority_element(a) != -1:
print(1)
else:
print(0)
|
JoeLove100/data-structures-and-algorithms
|
divide_and_conquer/majority_element.py
|
majority_element.py
|
py
| 1,557 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "sys.stdin.read",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 57,
"usage_type": "attribute"
}
] |
2857680026
|
import numpy as np
import torch
from functools import reduce # Required in Python 3
import operator
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def multi_index_to_single(tensor, index):
i = 0
return torch.stack([index[i] * prod([tensor.shape[j] for j in range(i + 1, tensor.ndim)]) + index[i + 1] for i in range(len(index) - 1)]).sum(0)
# return torch.stack([torch.Tensor([index[i] * prod([tensor.shape[j] for j in range(i + 1, tensor.ndim)]) + index[i + 1]]) for i in range(len(index) - 1)]).sum(0)
def add_at(tensor_a, index, tensor_b):
index_flat = multi_index_to_single(tensor_a, index)
# print(tensor_b.flatten().shape)
# print(index_flat.max())
# print(torch.index_select(tensor_a.flatten(), 0, index_flat))
# torch.index_add(tensor_a.flatten().float(), 0, index_flat, tensor_b.flatten())
# print(tensor_a.device, tensor_b.device, index_flat.device)
return tensor_a.flatten().index_add_(0, index_flat, tensor_b.flatten()).reshape(tensor_a.shape)
# gvi = np.random.randn(5, 5) + np.random.randn(5, 5) * 1j
gvi = np.random.randn(5, 5) + np.random.randn(5, 5) * 1j
# tensor(4067017.2500) tensor(141988016.) torch.float32 torch.Size([5982336])
# tensor(48913.6250) tensor(22521834.) torch.float32 torch.Size([5982336])
gvi = np.zeros([2300, 2300], dtype=complex)
visg = 141988016 * np.random.randn(5982336) + 4067017 + 22521834j * np.random.randn(5982336) + 48913j
undxi = np.random.randint(0, 2300, size=(len(visg)))
vndxi = np.random.randint(0, 2300, size=(len(visg)))
gvi_t = torch.from_numpy(gvi)
np.add.at(gvi, (undxi, vndxi), visg)
visg_t = torch.from_numpy(visg)
undxi = torch.from_numpy(undxi)
vndxi = torch.from_numpy(vndxi)
gvi_tr = gvi_t.real
gvi_ti = gvi_t.imag
visg_tr = visg_t.real
visg_ti = visg_t.imag
add_at(gvi_tr, (undxi, vndxi), visg_tr)
add_at(gvi_ti, (undxi, vndxi), visg_ti)
gvi_t = torch.view_as_complex(torch.stack([gvi_tr, gvi_ti], dim=-1))
assert np.isclose(gvi_t, gvi).all()
|
DavidRuhe/interferometry
|
src/gridding_python_improved/add_at.py
|
add_at.py
|
py
| 1,989 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "functools.reduce",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "operator.mul",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.stack",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "torch.from_numpy",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.add.at",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.add",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "torch.from_numpy",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torch.view_as_complex",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 59,
"usage_type": "call"
}
] |
37430539138
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: shopnum1 GuidBuyList.aspx SQL注入
referer: http://www.wooyun.org/bugs/wooyun-2015-0118447
author: Lucifer
description: 文件GuidBuyList.aspx中,参数guid存在SQL注入。
'''
import sys
import requests
class shopnum_GuidBuyList_sqli_BaseVerify:
def __init__(self, url):
self.url = url
def run(self):
headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"
}
payload = "/GuidBuyList.aspx?guid=97dcbadc-9b4f-4ff5-9ffb-17e46e10d66d%27AnD(ChAr(66)%2BChAr(66)%2BChAr(66)%2B@@VeRsiOn)%3E0--"
vulnurl = self.url + payload
try:
req = requests.get(vulnurl, headers=headers, timeout=10, verify=False)
if r"BBBMicrosoft" in req.text:
return "[+]存在shopnum1 GuidBuyList.aspx SQL注入漏洞...(高危)\tpayload: "+vulnurl
except:
return "[-]connect timeout"
if __name__ == "__main__":
testVuln = shopnum_GuidBuyList_sqli_BaseVerify(sys.argv[1])
testVuln.run()
|
iceyhexman/onlinetools
|
scanner/plugins/cms/shopnum/shopnum_GuidBuyList_sqli.py
|
shopnum_GuidBuyList_sqli.py
|
py
| 1,147 |
python
|
en
|
code
| 1,626 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 34,
"usage_type": "attribute"
}
] |
22460490121
|
import os
import sys
import argparse
import time
import warnings
import torch
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.getcwd().split('cbo-in-python')[0], 'cbo-in-python'))
from src.torch.models import *
from src.datasets import load_mnist_dataloaders
from src.torch import Optimizer, Loss
MODELS = {
'TinyMLP': TinyMLP,
'SmallMLP': SmallMLP,
'LeNet1': LeNet1,
'LeNet5': LeNet5,
}
DATASETS = {
'MNIST': load_mnist_dataloaders,
}
def _evaluate(model, X_, y_, loss_fn):
with torch.no_grad():
outputs = model(X_)
y_pred = torch.argmax(outputs, dim=1)
loss = loss_fn(outputs, y_)
acc = 1. * y_.eq(y_pred).sum().item() / y_.shape[0]
return loss, acc
def train(model, train_dataloader, test_dataloader, device, use_multiprocessing, processes,
epochs, particles, particles_batch_size,
alpha, sigma, l, dt, anisotropic, eps, partial_update, cooling,
eval_freq):
train_accuracies = []
train_losses = []
test_accuracies = []
test_losses = []
optimizer = Optimizer(model, n_particles=particles, alpha=alpha, sigma=sigma,
l=l, dt=dt, anisotropic=anisotropic, eps=eps, partial_update=partial_update,
use_multiprocessing=use_multiprocessing, n_processes=processes,
particles_batch_size=particles_batch_size, device=device)
loss_fn = Loss(F.nll_loss, optimizer)
n_batches = len(train_dataloader)
for epoch in range(epochs):
epoch_train_accuracies = []
epoch_train_losses = []
for batch, (X, y) in enumerate(train_dataloader):
X, y = X.to(device), y.to(device)
train_loss, train_acc = _evaluate(model, X, y, F.nll_loss)
epoch_train_accuracies.append(train_acc)
epoch_train_losses.append(train_loss.cpu())
optimizer.zero_grad()
loss_fn.backward(X, y, backward_gradients=False)
optimizer.step()
if batch % eval_freq == 0 or batch == n_batches - 1:
with torch.no_grad():
losses = []
accuracies = []
for X_test, y_test in test_dataloader:
X_test, y_test = X_test.to(device), y_test.to(device)
loss, acc = _evaluate(model, X_test, y_test, F.nll_loss)
losses.append(loss.cpu())
accuracies.append(acc)
val_loss, val_acc = np.mean(losses), np.mean(accuracies)
if batch == n_batches - 1:
test_accuracies.append(val_acc)
test_losses.append(val_loss)
print(
f'Epoch: {epoch + 1:2}/{epochs}, batch: {batch + 1:4}/{n_batches}, train loss: {train_loss:8.3f}, '
f'train acc: {train_acc:8.3f}, test loss: {val_loss:8.3f}, test acc: {val_acc:8.3f}',
flush=True)
train_accuracies.append(np.mean(epoch_train_accuracies))
train_losses.append(np.mean(epoch_train_losses))
if cooling:
optimizer.cooling_step()
return train_accuracies, test_accuracies, train_losses, test_losses
def build_plot(epochs, model_name, dataset_name, plot_path,
train_acc, test_acc, train_loss, test_loss):
plt.rcParams['figure.figsize'] = (20, 10)
plt.rcParams['font.size'] = 25
epochs_range = np.arange(1, epochs + 1, dtype=int)
plt.clf()
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.plot(epochs_range, train_acc, label='train')
ax1.plot(epochs_range, test_acc, label='test')
ax1.legend()
ax1.set_xlabel('epoch')
ax1.set_ylabel('accuracy')
ax1.set_title('Accuracy')
ax2.plot(epochs_range, train_loss, label='train')
ax2.plot(epochs_range, test_loss, label='test')
ax2.legend()
ax2.set_xlabel('epoch')
ax2.set_ylabel('loss')
ax2.set_title('Loss')
plt.suptitle(f'{model_name} @ {dataset_name}')
plt.savefig(plot_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--model', type=str, default='SmallMLP', help=f'architecture to use',
choices=list(MODELS.keys()))
parser.add_argument('--dataset', type=str, default='MNIST', help='dataset to use',
choices=list(DATASETS.keys()))
parser.add_argument('--device', type=str, choices=['cuda', 'cpu'], default='cuda',
help='whether to use GPU (cuda) for accelerated computations or not')
parser.add_argument('--use_multiprocessing', action='store_true',
help='specify to use multiprocessing for accelerating computations on CPU '
'(note, it is impossible to use multiprocessing with GPU)')
parser.add_argument('--processes', type=int, default=4,
help='how many processes to use for multiprocessing')
parser.add_argument('--epochs', type=int, default=10, help='train for EPOCHS epochs')
parser.add_argument('--batch_size', type=int, default=60, help='batch size (for samples-level batching)')
parser.add_argument('--particles', type=int, default=100, help='')
parser.add_argument('--particles_batch_size', type=int, default=10, help='batch size '
'(for particles-level batching)')
parser.add_argument('--alpha', type=float, default=50, help='alpha from CBO dynamics')
parser.add_argument('--sigma', type=float, default=0.4 ** 0.5, help='sigma from CBO dynamics')
parser.add_argument('--l', type=float, default=1, help='lambda from CBO dynamics')
parser.add_argument('--dt', type=float, default=0.1, help='dt from CBO dynamics')
parser.add_argument('--anisotropic', type=bool, default=True, help='whether to use anisotropic or not')
parser.add_argument('--eps', type=float, default=1e-5, help='threshold for additional random shift')
parser.add_argument('--partial_update', type=bool, default=True, help='whether to use partial or full update')
parser.add_argument('--cooling', type=bool, default=False, help='whether to apply cooling strategy')
parser.add_argument('--build_plot', required=False, action='store_true',
help='specify to build loss and accuracy plot')
parser.add_argument('--plot_path', required=False, type=str, default='demo.png',
help='path to save the resulting plot')
parser.add_argument('--eval_freq', type=int, default=100, help='evaluate test accuracy every EVAL_FREQ '
'samples-level batches')
args = parser.parse_args()
warnings.filterwarnings('ignore')
model = MODELS[args.model]()
train_dataloader, test_dataloader = DATASETS[args.dataset](train_batch_size=args.batch_size,
test_batch_size=args.batch_size)
device = args.device
if args.device == 'cuda' and not torch.cuda.is_available():
print('Cuda is unavailable. Using CPU instead.')
device = 'cpu'
use_multiprocessing = args.use_multiprocessing
if device != 'cpu' and use_multiprocessing:
print('Unable to use multiprocessing on GPU')
use_multiprocessing = False
device = torch.device(device)
print(f'Training {args.model} @ {args.dataset}')
start_time = time.time()
result = train(model, train_dataloader, test_dataloader, device, use_multiprocessing, args.processes,
args.epochs, args.particles, args.particles_batch_size,
args.alpha, args.sigma, args.l, args.dt, args.anisotropic, args.eps, args.partial_update,
args.cooling,
args.eval_freq)
print(f'Elapsed time: {time.time() - start_time} seconds')
if args.build_plot:
build_plot(args.epochs, args.model, args.dataset, args.plot_path,
*result)
|
Igor-Tukh/cbo-in-python
|
demo/torch_nn_demo.py
|
torch_nn_demo.py
|
py
| 8,219 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "src.datasets.load_mnist_dataloaders",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "torch.no_grad",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.argmax",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "src.torch.Optimizer",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "src.torch.Loss",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.nll_loss",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.nll_loss",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "torch.no_grad",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.nll_loss",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.suptitle",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentDefaultsHelpFormatter",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "warnings.filterwarnings",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 186,
"usage_type": "call"
}
] |
7161693364
|
"""
Given an unsorted integer array nums, return the smallest missing positive integer.
You must implement an algorithm that runs in O(n) time and uses constant extra space.
Example 1:
Input: nums = [1,2,0]
Output: 3
Example 2:
Input: nums = [3,4,-1,1]
Output: 2
Example 3:
Input: nums = [7,8,9,11,12]
Output: 1
Constraints:
1 <= nums.length <= 5 * 105
-231 <= nums[i] <= 231 - 1
"""
from typing import List
class Solution:
def remove_negative_nums(self,A):
temp = []
for num in A:
if num > 0:
temp.append(num)
return temp
def firstMissingPositive(self, nums: List[int]) -> int:
n = len(nums)
num_of_negative_int = 0
for num in nums:
if num<=0:
num_of_negative_int += 1
if n == num_of_negative_int:
return 1
else:
temp =[]
temp = self.remove_negative_nums(nums)
min_num = min(temp)
if min_num != 1:
return 1
else:
# temp = sorted(temp)
for i in range(1,len(temp)):
if min_num+1 in temp:
min_num += 1
else:
return min_num + 1
return min_num + 1
|
CompetitiveCodingLeetcode/LeetcodeEasy
|
Hard/FirstMissingPositive_Q41.py
|
FirstMissingPositive_Q41.py
|
py
| 1,307 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 37,
"usage_type": "name"
}
] |
74494631546
|
"""
Coin recognition, real life application
task: calculate the value of coins on picture
"""
import cv2
import numpy as np
def detect_coins():
coins = cv2.imread('../input_image/koruny.jpg', 1)
gray = cv2.cvtColor(coins, cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(gray, 7)
circles = cv2.HoughCircles(
img, # source image
cv2.HOUGH_GRADIENT, # type of detection
1,
50,
param1=100,
param2=50,
minRadius=10, # minimal radius
maxRadius=380, # max radius
)
coins_copy = coins.copy()
for detected_circle in circles[0]:
x_coor, y_coor, detected_radius = detected_circle
coins_detected = cv2.circle(
coins_copy,
(int(x_coor), int(y_coor)),
int(detected_radius),
(0, 255, 0),
4,
)
cv2.imwrite("../output_image/coin_amount/koruny_test_Hough.jpg", coins_detected)
return circles
def calculate_amount():
koruny = {
"1 CZK": {
"value": 1,
"radius": 20,
"ratio": 1,
"count": 0,
},
"2 CZK": {
"value": 2,
"radius": 21.5,
"ratio": 1.075,
"count": 0,
},
"5 CZK": {
"value": 5,
"radius": 23,
"ratio": 1.15,
"count": 0,
},
"10 CZK": {
"value": 10,
"radius": 24.5,
"ratio": 1.225,
"count": 0,
},
"20 CZK": {
"value": 20,
"radius": 26,
"ratio": 1.3,
"count": 0,
},
"50 CZK": {
"value": 50,
"radius": 27.5,
"ratio": 1.375,
"count": 0,
},
}
circles = detect_coins()
radius = []
coordinates = []
for detected_circle in circles[0]:
x_coor, y_coor, detected_radius = detected_circle
radius.append(detected_radius)
coordinates.append([x_coor, y_coor])
smallest = min(radius)
tolerance = 0.0375
total_amount = 0
coins_circled = cv2.imread('../output_image/coin_amount/koruny_test_Hough.jpg', 1)
font = cv2.FONT_HERSHEY_SIMPLEX
for coin in circles[0]:
ratio_to_check = coin[2] / smallest
coor_x = coin[0]
coor_y = coin[1]
for koruna in koruny:
value = koruny[koruna]['value']
if abs(ratio_to_check - koruny[koruna]['ratio']) <= tolerance:
koruny[koruna]['count'] += 1
total_amount += koruny[koruna]['value']
cv2.putText(coins_circled, str(value), (int(coor_x), int(coor_y)), font, 1,
(0, 0, 0), 4)
print(f"The total amount is {total_amount} CZK")
for koruna in koruny:
pieces = koruny[koruna]['count']
print(f"{koruna} = {pieces}x")
cv2.imwrite("../output_image/coin_amount/koruny_hodnota.jpg", coins_circled)
if __name__ == "__main__":
calculate_amount()
|
tinazhouhui/computer_vision
|
image_analysis/coin_amount_calculate.py
|
coin_amount_calculate.py
|
py
| 3,054 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "cv2.imread",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "cv2.medianBlur",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.HoughCircles",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.HOUGH_GRADIENT",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "cv2.circle",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 116,
"usage_type": "call"
}
] |
34900553836
|
#!/usr/bin/env python
import argparse
import collections
import operator
import os
import re
UA_RE = re.compile(r'"(Mozilla[^"]*?)"')
def extract_log(file_obj, counts):
for line in file_obj:
m = UA_RE.search(line)
if not m:
continue
counts[m.groups()[0]] += 1
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--directory', default='.')
parser.add_argument('-c', '--count', default=10)
args = parser.parse_args()
counts = collections.defaultdict(int)
for fname in os.listdir(args.directory):
if fname.startswith('access.log'):
with open(fname) as file_obj:
extract_log(file_obj, counts)
agents = list(
sorted(counts.items(), key=operator.itemgetter(1), reverse=True))
for agent, count in agents[:args.count]:
print('{:<7d} {}'.format(count, agent))
if __name__ == '__main__':
main()
|
eklitzke/nginx-ua-extract
|
extract.py
|
extract.py
|
py
| 946 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "re.compile",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "operator.itemgetter",
"line_number": 33,
"usage_type": "call"
}
] |
43627672364
|
from typing import List
class Solution:
# Two pointers
def maximumScore(self, nums: List[int], k: int) -> int:
i, j = k, k
n = len(nums)
res, minVal = nums[k], nums[k]
while 0 < i or j < n-1:
if i == 0:
j += 1
elif j == n-1:
i -= 1
elif nums[i-1] < nums[j+1]:
j += 1
else:
i -= 1
minVal = min(minVal, nums[i], nums[j])
res = max(res, minVal * (j-i+1))
return res
# O(N*N) is not good enough :(
def maximumScore_own_TLE(self, nums: List[int], k: int) -> int:
res = float('-inf')
n = len(nums)
minVals = [[float('inf')]*n for _ in range(n)]
for i in range(n):
minVal = float('inf')
for j in range(i, -1, -1):
minVal = min(minVal, nums[j])
minVals[j][i] = minVal
for i in range(k, -1, -1):
for j in range(k, len(nums)):
res = max(res, (minVals[i][j]) * (j - i + 1))
return res
def test(self):
test_cases = [
[[1,4,3,7,4,5], 3],
[[5,5,4,5,4,1,1,1], 0],
]
for nums, k in test_cases:
res = self.maximumScore(nums, k)
print('res: %s' % res)
print('-=' * 30 + '-')
if __name__ == '__main__':
Solution().test()
|
MichaelTQ/LeetcodePythonProject
|
solutions/leetcode_1751_1800/LeetCode1793_MaximumScoreOfAGoodSubarray.py
|
LeetCode1793_MaximumScoreOfAGoodSubarray.py
|
py
| 1,424 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 24,
"usage_type": "name"
}
] |
73924482426
|
import re
from bs4 import BeautifulSoup
'''
要爬取信息:
1. 基本信息
2. 作者简介
3. 内容简介
4. 原文摘录
5. 推荐电子书
6. 推荐书籍
7. 评论
'''
class Parser(object):
def __init__(self, soup):
self.title = soup.find('span', property="v:itemreviewed")
self.imgLink = soup.find('a', class_='nbg')
self.score = soup.find('div', class_='rating_self clearfix')
self.blockquote = soup.find('ul', class_='blockquote-list')
self.comments = soup.find('div', class_='comment-list new_score show')
self.moreInfo = soup.find('div', id="info")
# 内容简介
tmp = soup.find('div', class_="indent", id='link-report')
if tmp:
self.summary = tmp.find('span', class_='all hidden')
if self.summary is None:
self.summary = tmp.find('div', class_='intro')
else:
self.summary = None
# 作者简介
tmp = soup.find('div', class_='related_info').find('div', class_="indent", id=False)
if tmp:
self.author = tmp.find('span', class_='all hidden')
if self.author is None:
self.author = tmp.find('div', class_='intro')
else:
self.author = None
# 推荐电子书
tmp = soup.find('div', id='rec-ebook-section')
if tmp:
self.recom_ebook = tmp.find('div', class_='content clearfix')
else:
self.recom_ebook = None
# 推荐书籍
tmp = soup.find('div', id='db-rec-section')
if tmp:
self.recom_book = tmp.find('div', class_='content clearfix')
else:
self.recom_book = None
# 总的解析函数
def parse_all(self):
# 创建字典用来存放解析到的信息
bookDict = dict()
# 将信息整合到moveiDict中
bookDict['基本信息'] = self.parse_info()
bookDict['内容简介'] = self.parse_summary()
bookDict['作者简介'] = self.parse_author()
bookDict['原文摘录'] = self.parse_blockquote()
bookDict['电子书推荐'] = self.parse_recom_ebook()
bookDict['书籍推荐'] = self.parse_recom_book()
bookDict['热评'] = self.parse_comment()
return bookDict
# 解析基本信息
def parse_info(self):
infoDict = dict()
# 1.标题
infoDict['标题'] = self.title.text # 总不至于连标题也没有吧
# 2.图片链接
if self.imgLink:
infoDict['图片链接'] = self.imgLink.img['src']
# 3.评分 {'评分':['xxx 评分', 'xxx 人数']}
if self.score:
score = self.score.text.split()
score[1] = score[1][:-3]
infoDict['评分'] = score
# 5.解析更多信息
infoDict.update(self.parse_moreinfo())
return infoDict
# 解析更多信息
def parse_moreinfo(self):
infoDict = dict()
for item in re.split('<br>|<br/>', str(self.moreInfo)): # 不同信息
item = BeautifulSoup(item, "html.parser")
item = re.sub('[ \n]', '', item.text)
if not item:
continue
item = item.split(':')
key = item[0]
value = item[1].strip()
infoDict[key] = value
return infoDict
# 获取内容简介
def parse_summary(self):
if not self.summary:
return None
summary = ''
for par in self.summary.find_all('p'):
par = par.text.replace('\u3000', '')
summary = summary + par + '\par'
return summary
# 获取作者简介
def parse_author(self):
if not self.author:
return None
author = ''
for par in self.author.find_all('p'):
par = par.text.replace('\u3000', '')
author = author + par + '\par'
return author
# 获取原文摘录
def parse_blockquote(self):
if not self.blockquote:
return None
blockquote = list()
[s.extract() for s in self.blockquote('div')]
for item in self.blockquote.find_all('figure'):
blockquote.append(item.text.strip().split(' (查看原文)')[0])
return blockquote
# 相关电子书推荐
def parse_recom_ebook(self):
if not self.recom_ebook:
return None
recom_ebook = list()
for item in self.recom_ebook.find_all('dl'):
imgLink = item.img
imgLink = imgLink['src']
recom_ebook.append([item.text.split()[0], imgLink])
return recom_ebook
# 相关书籍推荐
def parse_recom_book(self):
if not self.recom_book:
return None
recom_book = list()
for item in self.recom_book.find_all('dl'):
if not item.text:
continue
imgLink = item.img
imgLink = imgLink['src']
recom_book.append([item.text.split()[0], imgLink])
return recom_book
# 评论
def parse_comment(self):
if not self.comments:
return None
comments = []
for item in self.comments.find_all('span', class_='short'):
comments.append(item.text)
return comments
# 接口函数
def run(self):
# print('开始解析')
res = self.parse_all()
return res
# 将dict 转换为 json格式
# return json.dumps(res, ensure_ascii=False, indent=1)
# if __name__ == '__main__':
# # 打开文件进行解析
# inputPath = r'C:\Users\31363\Desktop\Workspace\lab_web\book_spider\doc\demo.html'
# soup = BeautifulSoup(open(inputPath, encoding='utf8'), 'html.parser')
# parser = Parser(soup)
# parser.parse_all()
|
icecream-and-tea/labs_web
|
lab1/lab1_stage1/book_spider/src/html_parser.py
|
html_parser.py
|
py
| 5,863 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "re.split",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 101,
"usage_type": "call"
}
] |
17547705346
|
from keras.models import Model, load_model, save_model
from keras.layers import Input, Dense, Conv2D, Flatten, BatchNormalization, AveragePooling2D
from keras.activations import relu, softmax
from keras import backend as K
from keras.optimizers import Adam, RMSprop, SGD
import keras.initializers as initializers
class Actor():
def __init__(self, state_size, action_size, hyper_param={}, seed=714):
hyper_param = {
'lr': 1e-7,
}
self.seed = 714
self.state_size = state_size
self.action_size = action_size
state = Input(shape=self.state_size)
advantage = Input(shape=(1, ))
old_prediction = Input(shape=(self.action_size, ))
x = Conv2D(filters=20, kernel_size=(2, 2), strides=1, activation=relu, padding='same')(state)
x = AveragePooling2D()(x)
x = Conv2D(filters=20, kernel_size=(4, 4), strides=1, activation=relu, padding='same')(x)
x = AveragePooling2D()(x)
x = Flatten()(x)
x = Dense(units=512, activation=relu,
kernel_initializer=initializers.RandomNormal(mean=0.0, stddev=.00002, seed=self.seed),
bias_initializer=initializers.Constant(0.1)
)(x)
actions_prob = Dense(units=action_size, activation=softmax,
name='output')(x)
model = Model(inputs=[state, advantage, old_prediction], outputs=actions_prob)
model.compile(optimizer=
# SGD(lr=hyper_param['lr']),
# RMSprop(lr=hyper_param['lr']),
Adam(lr=hyper_param['lr']),
loss=[self.proximal_policy_optimization_loss(
advantage=advantage,
old_prediction=old_prediction)])
model.summary()
self.model = model
def proximal_policy_optimization_loss(self, advantage, old_prediction):
LOSS_CLIPPING = 0.2
ENTROPY_LOSS = 0.007
def loss(y_true, y_pred):
prob = y_true * y_pred
old_prob = y_true * old_prediction
r = prob/(old_prob + 1e-10)
return -K.mean(
K.minimum(
r * advantage,
K.clip(
r,
min_value=1 - LOSS_CLIPPING,
max_value=1 + LOSS_CLIPPING
) * advantage
) + ENTROPY_LOSS * (prob * K.log(prob + 1e-10))
)
return loss
def save_model(self, name):
self.model.save(name)
def load_model(self, name):
# state = Input(shape=self.state_size)
advantage = Input(shape=(1, ))
old_prediction = Input(shape=(self.action_size, ))
model = load_model(name,
custom_objects={'loss':
self.proximal_policy_optimization_loss(
advantage=advantage,
old_prediction=old_prediction)})
self.model = model
|
rlalpha/rl-trial
|
ppo/actor.py
|
actor.py
|
py
| 3,131 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "keras.layers.Input",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "keras.layers.Input",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "keras.layers.Input",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "keras.activations.relu",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "keras.layers.AveragePooling2D",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "keras.activations.relu",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "keras.layers.AveragePooling2D",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "keras.layers.Flatten",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "keras.activations.relu",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "keras.initializers.RandomNormal",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "keras.initializers",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "keras.initializers.Constant",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "keras.initializers",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "keras.layers.Dense",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "keras.activations.softmax",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "keras.models.Model",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "keras.optimizers.Adam",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "keras.backend.mean",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "keras.backend.minimum",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "keras.backend.clip",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "keras.backend.log",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "keras.layers.Input",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "keras.layers.Input",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "keras.models.load_model",
"line_number": 73,
"usage_type": "call"
}
] |
70808563069
|
import torch
from .assign_result import AssignResult
from .base_assigner import BaseAssigner
def calc_region(bbox, ratio, stride, featmap_size=None):
# Base anchor locates in (stride - 1) * 0.5
f_bbox = (bbox - (stride - 1) * 0.5) / stride
x1 = torch.round((1 - ratio) * f_bbox[0] + ratio * f_bbox[2])
y1 = torch.round((1 - ratio) * f_bbox[1] + ratio * f_bbox[3])
x2 = torch.round(ratio * f_bbox[0] + (1 - ratio) * f_bbox[2])
y2 = torch.round(ratio * f_bbox[1] + (1 - ratio) * f_bbox[3])
if featmap_size is not None:
x1 = x1.clamp(min=0, max=featmap_size[1] - 1)
y1 = y1.clamp(min=0, max=featmap_size[0] - 1)
x2 = x2.clamp(min=0, max=featmap_size[1] - 1)
y2 = y2.clamp(min=0, max=featmap_size[0] - 1)
return (x1, y1, x2, y2)
def anchor_ctr_inside_region_flags(anchors, stride, region):
x1, y1, x2, y2 = region
f_anchors = (anchors - (stride - 1) * 0.5) / stride
x = (f_anchors[:, 0] + f_anchors[:, 2]) * 0.5
y = (f_anchors[:, 1] + f_anchors[:, 3]) * 0.5
flags = (x >= x1) & (x <= x2) & (y >= y1) & (y <= y2)
return flags
def anchor_outside_flags(flat_anchors,
valid_flags,
img_shape,
allowed_border=0):
img_h, img_w = img_shape[:2]
if allowed_border >= 0:
inside_flags = valid_flags & \
(flat_anchors[:, 0] >= -allowed_border) & \
(flat_anchors[:, 1] >= -allowed_border) & \
(flat_anchors[:, 2] < img_w + allowed_border) & \
(flat_anchors[:, 3] < img_h + allowed_border)
else:
inside_flags = valid_flags
outside_flags = ~inside_flags
return outside_flags
class RegionAssigner(BaseAssigner):
"""Assign a corresponding gt bbox or background to each bbox.
Each proposals will be assigned with `-1`, `0`, or a positive integer
indicating the ground truth index.
- -1: don't care
- 0: negative sample, no assigned gt
- positive integer: positive sample, index (1-based) of assigned gt
Args:
pos_iou_thr (float): IoU threshold for positive bboxes.
neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
min_pos_iou (float): Minimum iou for a bbox to be considered as a
positive bbox. Positive samples can have smaller IoU than
pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
"""
# TODO update docs
def __init__(self, center_ratio=0.2, ignore_ratio=0.5):
self.center_ratio = center_ratio
self.ignore_ratio = ignore_ratio
def assign(self,
mlvl_anchors,
mlvl_valid_flags,
gt_bboxes,
img_meta,
featmap_sizes,
anchor_scale,
anchor_strides,
gt_bboxes_ignore=None,
gt_labels=None,
allowed_border=0):
"""Assign gt to anchors.
This method assign a gt bbox to every bbox (proposal/anchor), each bbox
will be assigned with -1, 0, or a positive number. -1 means don't care,
0 means negative sample, positive number is the index (1-based) of
assigned gt.
The assignment is done in following steps, the order matters.
1. Assign every anchor to 0 (negative)
For each gt_bboxes:
2. Compute ignore flags based on ignore_region then
assign -1 to anchors w.r.t. ignore flags
3. Compute pos flags based on center_region then
assign gt_bboxes to anchors w.r.t. pos flags
4. Compute ignore flags based on adjacent anchor lvl then
assign -1 to anchors w.r.t. ignore flags
5. Assign anchor outside of image to -1
Args:
bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
labelled as `ignored`, e.g., crowd boxes in COCO.
gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
Returns:
:obj:`AssignResult`: The assign result.
"""
# TODO support gt_bboxes_ignore
if gt_bboxes_ignore is not None:
raise NotImplementedError
if gt_bboxes.shape[0] == 0:
raise ValueError('No gt bboxes')
num_gts = gt_bboxes.shape[0]
num_lvls = len(mlvl_anchors)
r1 = (1 - self.center_ratio) / 2
r2 = (1 - self.ignore_ratio) / 2
scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) *
(gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1))
min_anchor_size = scale.new_full(
(1, ), float(anchor_scale * anchor_strides[0]))
target_lvls = torch.floor(
torch.log2(scale) - torch.log2(min_anchor_size) + 0.5)
target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long()
# 1. assign 0 (negative) by default
mlvl_assigned_gt_inds = []
mlvl_ignore_flags = []
for lvl in range(num_lvls):
h, w = featmap_sizes[lvl]
assert h * w == mlvl_anchors[lvl].shape[0]
assigned_gt_inds = gt_bboxes.new_full((h * w, ),
0,
dtype=torch.long)
ignore_flags = torch.zeros_like(assigned_gt_inds)
mlvl_assigned_gt_inds.append(assigned_gt_inds)
mlvl_ignore_flags.append(ignore_flags)
for gt_id in range(num_gts):
lvl = target_lvls[gt_id].item()
featmap_size = featmap_sizes[lvl]
stride = anchor_strides[lvl]
anchors = mlvl_anchors[lvl]
gt_bbox = gt_bboxes[gt_id, :4]
# Compute regions
ignore_region = calc_region(gt_bbox, r2, stride, featmap_size)
ctr_region = calc_region(gt_bbox, r1, stride, featmap_size)
# 2. Assign -1 to ignore flags
ignore_flags = anchor_ctr_inside_region_flags(
anchors, stride, ignore_region)
mlvl_assigned_gt_inds[lvl][ignore_flags > 0] = -1
# 3. Assign gt_bboxes to pos flags
pos_flags = anchor_ctr_inside_region_flags(anchors, stride,
ctr_region)
mlvl_assigned_gt_inds[lvl][pos_flags > 0] = gt_id + 1
# 4. Assign -1 to ignore adjacent lvl
if lvl > 0:
d_lvl = lvl - 1
d_anchors = mlvl_anchors[d_lvl]
d_featmap_size = featmap_sizes[d_lvl]
d_stride = anchor_strides[d_lvl]
d_ignore_region = calc_region(gt_bbox, d_stride, r2,
d_featmap_size)
ignore_flags = anchor_ctr_inside_region_flags(
d_anchors, d_stride, d_ignore_region)
mlvl_ignore_flags[d_lvl][ignore_flags > 0] = 1
if lvl < num_lvls - 1:
u_lvl = lvl + 1
u_anchors = mlvl_anchors[u_lvl]
u_featmap_size = featmap_sizes[u_lvl]
u_stride = anchor_strides[u_lvl]
u_ignore_region = calc_region(gt_bbox, u_stride, r2,
u_featmap_size)
ignore_flags = anchor_ctr_inside_region_flags(
u_anchors, u_stride, u_ignore_region)
mlvl_ignore_flags[u_lvl][ignore_flags > 0] = 1
# 4. (cont.) Assign -1 to ignore adjacent lvl
for lvl in range(num_lvls):
ignore_flags = mlvl_ignore_flags[lvl]
mlvl_assigned_gt_inds[lvl][ignore_flags > 0] = -1
# 5. Assign -1 to anchor outside of image
flat_assigned_gt_inds = torch.cat(mlvl_assigned_gt_inds)
flat_anchors = torch.cat(mlvl_anchors)
flat_valid_flags = torch.cat(mlvl_valid_flags)
assert (flat_assigned_gt_inds.shape[0] == flat_anchors.shape[0] ==
flat_valid_flags.shape[0])
outside_flags = anchor_outside_flags(flat_anchors, flat_valid_flags,
img_meta['img_shape'],
allowed_border)
flat_assigned_gt_inds[outside_flags] = -1
if gt_labels is not None:
assigned_labels = torch.zeros_like(flat_assigned_gt_inds)
pos_flags = assigned_gt_inds > 0
assigned_labels[pos_flags] = gt_labels[
flat_assigned_gt_inds[pos_flags] - 1]
else:
assigned_labels = None
return AssignResult(
num_gts, flat_assigned_gt_inds, None, labels=assigned_labels)
|
thangvubk/Cascade-RPN
|
mmdet/core/bbox/assigners/region_assigner.py
|
region_assigner.py
|
py
| 8,816 |
python
|
en
|
code
| 177 |
github-code
|
6
|
[
{
"api_name": "torch.round",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.round",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.round",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.round",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "base_assigner.BaseAssigner",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "torch.sqrt",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "torch.floor",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "torch.log2",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "torch.zeros_like",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "torch.zeros_like",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "assign_result.AssignResult",
"line_number": 208,
"usage_type": "call"
}
] |
40466806630
|
import pyvirtualcam
import cv2
import time
from filters import Filters
import math
from datetime import datetime
import ML.HandTrackingModule as htm
class VCam:
def __init__(self, mxhand, video, f, detCon=0.5, cw=640, ch=480, du=True):
cv2.namedWindow('feedback')
self.videocap = video
self.filterList = ['normal', 'negative', 'bgr2gray']
self.filterIndex = 0
self.inputKey = -1
# Utils
self.toDU = du
self.nextX, self.nextY = cw - 40, ch // 2
self.prevX, self.prevY = 40, ch // 2
self.escX, self.escY = cw - 40, 40
self.radius = 40
# Hand tracking
self.detector = htm.HandDetector(detectionCon=detCon, maxHands=mxhand)
self.finger = f
self.pressing = False
self.initialTime = datetime.timestamp(datetime.now())
self.vc = cv2.VideoCapture(self.videocap)
if not self.vc.isOpened():
raise RuntimeError('Can\'t open your camera, please check if videocap is validy device, try using "v4l2-ctl --list-device"')
self.vc.set(cv2.CAP_PROP_FRAME_WIDTH, cw)
self.vc.set(cv2.CAP_PROP_FRAME_HEIGHT, ch)
self.vc.set(cv2.CAP_PROP_FPS, 30)
# Query final capture device values (may be different from preferred settings).
self.width = int(self.vc.get(cv2.CAP_PROP_FRAME_WIDTH))
self.height = int(self.vc.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.fps_out = self.vc.get(cv2.CAP_PROP_FPS)
self.ret, self.frame = self.vc.read()
if not self.ret:
raise RuntimeError('Error fetching frame')
self.display = True
def start(self):
with pyvirtualcam.Camera(self.width, self.height, self.fps_out, print_fps=False, fmt=pyvirtualcam.PixelFormat.BGR,) as cam:
print(f'Virtual cam started: {cam.device} ({cam.width}x{cam.height} @ {cam.fps}fps)')
pTime, cTime = 0,0
while self.display:
# Read frame from webcam.
self.ret, self.frame = self.vc.read()
self.frame = cv2.flip(self.frame, 1)
if not self.ret:
raise RuntimeError('Error fetching frame')
# Hand track control
self.handCommands()
self.inputKey = cv2.waitKey(1)
if self.inputKey != -1:
self.camInputs()
filter = self.filterList[self.filterIndex]
self.frame = getattr(Filters, filter)(self.frame)
cTime = time.time()
fps = int(1 / (cTime - pTime))
pTime = cTime
cv2.putText(self.frame, str(fps), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cam.send(self.frame)
if self.toDU:
self.drawUtils()
self.detector.drawMarks(self.frame, drawFingerMark=[self.finger])
cv2.imshow('feedback', self.frame)
print('Virtual camera closed')
def handCommands(self):
self.detector.findHands(self.frame)
lmList, bbox = self.detector.findPosition(self.frame)
if lmList:
fingerX, fingerY = lmList[self.finger][1], lmList[self.finger][2]
init = datetime.timestamp(datetime.now())
# next filter
if math.hypot(fingerX - self.nextX, fingerY - self.nextY) <= 30:
actual = datetime.timestamp(datetime.now())
if not self.pressing:
self.pressing = True
self.initialTime = init
else:
presstime = actual - self.initialTime
if presstime >= 1:
self.filterIndex = (self.filterIndex + 1) % len(self.filterList)
self.pressing = False
# previus filter
elif math.hypot(fingerX - self.prevX, fingerY - self.prevY) <= 30:
actual = datetime.timestamp(datetime.now())
if not self.pressing:
self.pressing = True
self.initialTime = init
else:
presstime = actual - self.initialTime
if presstime >= 1:
self.filterIndex = (self.filterIndex - 1) % len(self.filterList)
self.pressing = False
# close cam
elif math.hypot(fingerX - self.escX, fingerY - self.escY) <= 30:
actual = datetime.timestamp(datetime.now())
if not self.pressing:
self.pressing = True
self.initialTime = init
else:
presstime = actual - self.initialTime
if presstime >= 2:
self.vc.release()
self.display = False
self.pressing = False
else:
self.pressing = False
self.initialTime = init
def camInputs(self):
# ESC
if self.inputKey == 27:
cv2.destroyWindow('feedback')
self.vc.release()
self.display = False
# [
elif self.inputKey == 91:
self.filterIndex = (self.filterIndex - 1) % len(self.filterList)
# ]
elif self.inputKey == 93:
self.filterIndex = (self.filterIndex + 1) % len(self.filterList)
def drawUtils(self):
# Drawing area for hand tracker commands
cv2.circle(self.frame, (self.nextX, self.nextY), self.radius, (255, 0, 0))
cv2.circle(self.frame, (self.prevX, self.prevY), self.radius, (255, 0, 0))
cv2.circle(self.frame, (self.escX, self.escY), self.radius, (255, 0, 0))
|
biguelito/funcam
|
vcam.py
|
vcam.py
|
py
| 5,797 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.namedWindow",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "ML.HandTrackingModule.HandDetector",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "ML.HandTrackingModule",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.timestamp",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_FRAME_WIDTH",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FRAME_HEIGHT",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FPS",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FRAME_WIDTH",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FRAME_HEIGHT",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FPS",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "pyvirtualcam.Camera",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "pyvirtualcam.PixelFormat",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "cv2.flip",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "filters.Filters",
"line_number": 71,
"usage_type": "argument"
},
{
"api_name": "time.time",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.timestamp",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "math.hypot",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.timestamp",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "math.hypot",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.timestamp",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "math.hypot",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.timestamp",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "cv2.destroyWindow",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 164,
"usage_type": "call"
}
] |
15273361459
|
from . import TestCase
from flask import url_for
from .. import db
from ...models import User
class UsersTest(TestCase):
render_templates = False
def test_list_users(self):
self._create_user()
response = self.as_user('get', url_for("users"))
self.assertEquals(1, len(response.json['_embedded']['users']))
def test_list_users_paginate(self):
for i in range(1, 30):
self._create_user(i)
response = self.as_user('get', url_for("users"))
self.assertEquals(25, len(response.json['_embedded']['users']))
next_link = response.json['_links']['next']['href']
self.assertEquals("/users?page=2", next_link)
response = self.as_user('get', next_link)
self.assertFalse('next' in response.json['_links'])
def test_view_user(self):
self._create_user()
response = self.as_user('get', url_for("user", id=1))
self.assertEquals("[email protected]", response.json['email'])
def test_add_user(self):
data = {'email': '[email protected]'}
self.as_user('post', url_for("users"), data=data)
u = User.query.get(1)
self.assertEquals('[email protected]', u.email)
def test_add_user_invalid(self):
data = {}
response = self.as_user('post', url_for("users"), data=data)
self.assert400(response)
message = "Error in the email field - This field is required."
self.assertEquals(message, response.json['message'])
def test_edit_user(self):
u = self._create_user()
data = {'email': '[email protected]', 'first_name': 'Fist Name'}
self.as_user('put', url_for("user", id=1), data=data)
self.assertEquals('Fist Name', u.first_name)
def test_delete_user(self):
u = self._create_user()
self.as_user('delete', url_for("user", id=1))
self.assertEqual(False, u.active)
def _create_user(self, id=1):
u = User()
u.id = id
u.email = "user%[email protected]" % id
u.password = "Password"
db.session.add(u)
return u
|
juokaz/flask-skeleton
|
website/api/tests/users_test.py
|
users_test.py
|
py
| 2,120 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.url_for",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "models.User.query.get",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "models.User.query",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "flask.url_for",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "models.User",
"line_number": 74,
"usage_type": "call"
}
] |
27009005628
|
from scipy.io import loadmat
import numpy as np
import xlrd as x
import pandas as pd
def run(file, delimiter):
file_name = file["file"]
file_type_list = file_name.split(".")
file_type = file_type_list[len(file_type_list) - 1]
if file_type == 'mat':
key = file["key"]
array = read_mat(file_name, key)
elif file_type == 'csv':
array = read_csv(file_name, delimiter)
elif file_type == 'txt':
array = read_csv(file_name, delimiter)
elif file_type == 'xlsx':
array = read_xls(file_name)
elif file_type == 'xls':
array = read_xls(file_name)
else:
array = np.array([[]])
return {"array": array.tolist()}
def read_mat(file, matKey):
dict = loadmat(file)
return dict[matKey]
def read_csv(file, delimiter):
array = np.loadtxt(file, delimiter=delimiter)
return np.array(array, dtype=float)
def read_xls(file):
array = pd.read_excel(file, sheet_name=0, header=None)
return np.array(array, dtype=float)
|
lisunshine1234/mlp-algorithm-python
|
data/read/read/run.py
|
run.py
|
py
| 1,021 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.array",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "scipy.io.loadmat",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 42,
"usage_type": "call"
}
] |
35508636359
|
#START{
import os
from github import Github
import json
import sys
import re
import time
from tabulate import tabulate
def clone_repos(GITHUB_ACCESS_TOKEN,GITHUB_USERNAME):
g = Github(GITHUB_ACCESS_TOKEN)
# Create "repos" folder if it doesn't exist
if not os.path.exists("repos"):
os.makedirs("repos")
# Create "public" and "private" folders within "repos"
public_folder = os.path.join("repos", "public")
private_folder = os.path.join("repos", "private")
if not os.path.exists(public_folder):
os.makedirs(public_folder)
if not os.path.exists(private_folder):
os.makedirs(private_folder)
# Clone all public repositories owned by the user
for repo in g.get_user().get_repos(affiliation='owner'):
if not repo.private:
os.makedirs(os.path.join(public_folder), exist_ok=True)
if os.path.exists(os.path.join(public_folder, repo.name)):
pass
else:
os.system(f"git clone {repo.clone_url} {os.path.join(public_folder, repo.name)}")
os.system(f"rm -rf {os.path.join(public_folder, repo.name, '.git')}")
# Clone all private repositories owned by the user
for repo in g.get_user().get_repos(affiliation='owner'):
if repo.private:
os.makedirs(os.path.join(private_folder), exist_ok=True)
# Include the access token and username in the clone URL to avoid being prompted for them
if os.path.exists(os.path.join(private_folder, repo.name)):
pass
else:
os.system(f"git clone https://{GITHUB_USERNAME}:{GITHUB_ACCESS_TOKEN}@{repo.clone_url.split('://')[1]} {os.path.join(private_folder, repo.name)}")
os.system(f"rm -rf {os.path.join(private_folder, repo.name, '.git')}")
def is_binary_file(filepath):
with open(filepath, 'rb') as f:
chunk = f.read(1024)
if b'\0' in chunk:
return True
return False
def count_lines(filepath, language):
total_lines = 0
code_lines = 0
comment_lines = 0
empty_lines = 0
with open(filepath, 'r', encoding='utf-8', errors='ignore') as f:
for line in f:
total_lines += 1
line = line.strip()
if not line:
empty_lines += 1
elif re.match(language['comment_regex'], line):
comment_lines += 1
else:
code_lines += 1
return (total_lines, code_lines, comment_lines, empty_lines)
def get_language(filepath, languages):
for language in languages.values():
for extension in language['extensions']:
if filepath.endswith(extension):
return language
return None
def get_filetypes(dirpath):
filetypes = {}
for root, dirs, files in os.walk(dirpath):
for file in files:
filepath = os.path.join(root, file)
if not is_binary_file(filepath):
ext = os.path.splitext(file)[1]
if ext not in filetypes:
filetypes[ext] = 0
filetypes[ext] += 1
return filetypes
def main():
dirpath = "./repos"
if not os.path.isdir(dirpath):
print("Invalid directory path")
return
with open("languages.json") as f:
languages = json.load(f)
total_lines = 0
total_code_lines = 0
total_comment_lines = 0
total_empty_lines = 0
lang_lines = {}
filetypes = get_filetypes(dirpath)
new_dict = {}
for key in languages:
name = languages[key]["name"].upper()
new_dict[name] = 0
for root, dirs, files in os.walk(dirpath):
for file in files:
filepath = os.path.join(root, file)
if not is_binary_file(filepath):
language = get_language(filepath, languages)
if language:
(total, code, comment, empty) = count_lines(filepath, language)
total_lines += total
total_code_lines += code
total_comment_lines += comment
total_empty_lines += empty
lang_name = language["name"].upper()
new_dict[f"{lang_name}"] += 1
if lang_name not in lang_lines:
lang_lines[lang_name] = {'total': 0, 'code': 0, 'comment': 0, 'empty': 0}
lang_lines[lang_name]['total'] += total
lang_lines[lang_name]['code'] += code
lang_lines[lang_name]['comment'] += comment
lang_lines[lang_name]['empty'] += empty
total_files = sum(new_dict.values())
#all_var=""
#all_var+="Language Files Total Lines Code Lines Comment Lines Empty Lines"+"\n"
#all_var+="-"*84+"\n"
data = []
for lang, lines in lang_lines.items():
total = lines['total']
code = lines['code']
comment = lines['comment']
empty = lines['empty']
data.append({'Language': f'{lang}', 'Files': new_dict[f"{lang}"], 'Total Lines': total, 'Code Lines': code, 'Comment Lines': comment, 'Empty Lines': empty})
#all_var+="{:<12}{:<9}{:<17}{:<17}{:<19}{}".format(lang, new_dict[f"{lang}"], total, code, comment, empty)+"\n"
#all_var+="-"*84+"\n"
#all_var+="{:<12}{:<9}{:<17}{:<17}{:<19}{}".format("TOTAL", total_files, total_lines, total_code_lines, total_comment_lines, total_empty_lines)
#data.append({'Language': 'TOTAL', 'Files': total_files, 'Total Lines': total_lines, 'Code Lines': total_code_lines, 'Comment Lines': total_comment_lines, 'Empty Lines': total_empty_lines})
return data
def format_table(data):
headers = ['Language', 'Files', 'Total Lines', 'Code Lines', 'Comment Lines', 'Empty Lines']
# Sort the data by Total Lines in descending order
sorted_data = sorted(data, key=lambda x: x['Total Lines'], reverse=True)
table = []
for d in sorted_data:
row = [d['Language'], d['Files'], d['Total Lines'], d['Code Lines'], d['Comment Lines'], d['Empty Lines']]
table.append(row)
table.append(['TOTAL', sum(d['Files'] for d in data), sum(d['Total Lines'] for d in data),
sum(d['Code Lines'] for d in data), sum(d['Comment Lines'] for d in data), sum(d['Empty Lines'] for d in data)])
return tabulate(table, headers, tablefmt='pipe')
if __name__ == '__main__':
if len(sys.argv) < 2:
print(f"Usage: python {os.path.basename(__file__)} 'GITHUB_ACCESS_TOKEN' 'GITHUB_USERNAME'")
exit()
else:
clone_repos(sys.argv[1],sys.argv[2])
all_var=format_table(main())
with open('README.md', 'w') as f:
# Get current date and time in seconds since the epoch
seconds_since_epoch = time.time()
# Format the value as a date and time string
date_time_string = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(seconds_since_epoch))
# Print the value
print(date_time_string)
f.write(all_var)
f.write("\n\nLast Update: "+date_time_string)
print(all_var)
#}END.
|
TAFFAHACHRAF/TAFFAHACHRAF
|
main.py
|
main.py
|
py
| 7,201 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "github.Github",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "os.system",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "os.system",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "re.match",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "tabulate.tabulate",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 172,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 180,
"usage_type": "call"
}
] |
40005326365
|
import pytest
from xdlang.structures import XDType, ast
from xdlang.visitors.parser import parse_text, transform_parse_tree
def parse_and_transform_expr(program_text: str):
parsed = parse_text(program_text, start="expr")
ast = transform_parse_tree(parse_tree=parsed)
return ast
@pytest.mark.parametrize(
"text,type,value",
[
("42", XDType.INT, 42),
("17.00", XDType.FLOAT, 17.0),
("'Q'", XDType.CHAR, "Q"),
("false", XDType.BOOL, False),
("true", XDType.BOOL, True),
],
)
def test_literal(text, type, value):
node: ast.LiteralNode = parse_and_transform_expr(text)
assert isinstance(node, ast.LiteralNode)
assert node.type == type
assert node.value == value
|
mbednarski/xdlang
|
tests/ast/test_ast_literal.py
|
test_ast_literal.py
|
py
| 742 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "xdlang.visitors.parser.parse_text",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "xdlang.structures.ast",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "xdlang.visitors.parser.transform_parse_tree",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "xdlang.structures.ast",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "xdlang.structures.ast.LiteralNode",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "xdlang.structures.ast",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "xdlang.structures.ast.LiteralNode",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "xdlang.structures.ast",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "xdlang.structures.XDType.INT",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "xdlang.structures.XDType",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "xdlang.structures.XDType.FLOAT",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "xdlang.structures.XDType",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "xdlang.structures.XDType.CHAR",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "xdlang.structures.XDType",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "xdlang.structures.XDType.BOOL",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "xdlang.structures.XDType",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "xdlang.structures.XDType.BOOL",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "xdlang.structures.XDType",
"line_number": 20,
"usage_type": "name"
}
] |
10220508565
|
from time import time
from nazurin.database import Database
from nazurin.models import Illust
from .api import Zerochan
from .config import COLLECTION
patterns = [
# https://www.zerochan.net/123456
r"zerochan\.net/(\d+)",
# https://s1.zerochan.net/Abcdef.600.123456.jpg
# https://static.zerochan.net/Abcdef.full.123456.jpg
r"zerochan\.net/\S+\.(\d+)\.\w+$",
]
async def handle(match) -> Illust:
post_id = match.group(1)
api = Zerochan()
db = Database().driver()
collection = db.collection(COLLECTION)
illust = await api.view(post_id)
illust.metadata["collected_at"] = time()
await collection.insert(int(post_id), illust.metadata)
return illust
|
y-young/nazurin
|
nazurin/sites/zerochan/interface.py
|
interface.py
|
py
| 702 |
python
|
en
|
code
| 239 |
github-code
|
6
|
[
{
"api_name": "api.Zerochan",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "nazurin.database.Database",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "config.COLLECTION",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "api.view",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "nazurin.models.Illust",
"line_number": 18,
"usage_type": "name"
}
] |
21867315415
|
import phywhisperer.interface.naeusb as NAE
import phywhisperer.interface.program_fpga as LLINT
import os
import re
import logging
import pkg_resources
import threading
import time
from phywhisperer.interface.bootloader_sam3u import Samba
from phywhisperer.sniffer import USBSniffer, USBSimplePrintSink
from phywhisperer.protocol import PWPacketDispatcher, PWPacketHandler, IncompletePacket
from zipfile import ZipFile
from phywhisperer.firmware.phywhisperer import getsome
class Usb(PWPacketDispatcher):
"""PhyWhisperer-USB Interface"""
MAX_PATTERN_LENGTH = 64
def __init__ (self, viewsb=False):
""" Set up PhyWhisperer-USB device.
Args:
viewsb: Should only be set to 'True' when this is called by ViewSB.
"""
self.viewsb = viewsb
self.addpattern = False
self.short_timestamps = [0] * 2**3
self.long_timestamps = [0] * 2**16
self.stat_pattern_match_value = 0
self.capture_size = 8188 # default to FIFO size
self.usb_trigger_freq = 240E6 #internal frequency used for trigger ticks
self.entries_captured = 0
self.expected_verilog_matches = 80
self.slurp_defines()
# Set up the PW device to handle packets in ViewSB:
if viewsb:
super().__init__(verbose=False)
self.sniffer = USBSniffer()
self.register_packet_handler(self.sniffer)
def slurp_defines(self):
""" Parse Verilog defines file so we can access register and bit
definitions by name and avoid 'magic numbers'.
"""
self.verilog_define_matches = 0
defines_files = [pkg_resources.resource_filename('phywhisperer', 'firmware/defines_pw.v'),
pkg_resources.resource_filename('phywhisperer', 'firmware/defines_usb.v')]
for i,defines_file in enumerate(defines_files):
defines = open(defines_file, 'r')
define_regex_base = re.compile(r'`define')
define_regex_reg = re.compile(r'`define\s+?REG_')
define_regex_radix = re.compile(r'`define\s+?(\w+).+?\'([bdh])([0-9a-fA-F]+)')
define_regex_noradix = re.compile(r'`define\s+?(\w+?)\s+?(\d+?)')
for define in defines:
if define_regex_base.search(define):
reg = define_regex_reg.search(define)
match = define_regex_radix.search(define)
if reg:
if i == 0:
block_offset = self.MAIN_REG_SELECT << 6
else:
block_offset = self.USB_REG_SELECT << 6
else:
block_offset = 0
if match:
self.verilog_define_matches += 1
if match.group(2) == 'b':
radix = 2
elif match.group(2) == 'h':
radix = 16
else:
radix = 10
setattr(self, match.group(1), int(match.group(3),radix) + block_offset)
else:
match = define_regex_noradix.search(define)
if match:
self.verilog_define_matches += 1
setattr(self, match.group(1), int(match.group(2),10) + block_offset)
else:
logging.warning("Couldn't parse line: %s", define)
defines.close()
assert self.verilog_define_matches == self.expected_verilog_matches, "Trouble parsing Verilog defines files: didn't find the right number of defines (expected %d, got %d)." % (self.expected_verilog_matches, self.verilog_define_matches)
def con(self, PID=0xC610, sn=None, program_fpga=True, bitstream_file=None):
"""Connect to PhyWhisperer-USB. Raises error if multiple detected
Args:
PID (int, optional): USB PID of PhyWhisperer, defaults to 0xC610 (NewAE standard).
sn (int, option): Serial Number of PhyWhisperer, required when multiple
PhyWhisperers are connected.
program_fpga (bool, option): Specifies whether or not to program the FPGA with
the default firmware when we connect. Set to False if using custom bitstream.
"""
self.usb = NAE.NAEUSB()
self.usb.con(idProduct=[PID], serial_number=sn)
self._llint = LLINT.PhyWhispererUSB(self.usb)
if program_fpga:
if bitstream_file is None:
with ZipFile(getsome("phywhisperer-firmware.zip")) as myzip:
with myzip.open('phywhisperer_top.bit') as bitstream:
self._llint.FPGAProgram(bitstream)
pass
else:
print("Programming custom bit stream '%s'" % bitstream_file)
with open(bitstream_file,"rb") as bitstream:
self._llint.FPGAProgram(bitstream)
self.write_reg(self.REG_COUNT_WRITES, [1])
def set_power_source(self, src):
"""Set power source for target.
Args:
src (str):
* "5V" for power from this computer (via 'Control' USB port).
* "host" for power from the host of the connection we're sniffing.
* "off" for no power.
"""
if src == "5V":
self._llint.changePowerSource(self._llint.PWR_SRC_5V)
pass
elif src == "host":
self._llint.changePowerSource(self._llint.PWR_SRC_HOST)
pass
elif src == "off" or src is None or src == False:
self._llint.changePowerSource(self._llint.PWR_SRC_OFF)
pass
else:
raise AttributeError("Unknown source %s, valid sources: '5V', 'host', 'off'")
def reset_fpga(self):
""" Reset FPGA registers to defaults, use liberally to clear incorrect states.
"""
#self._llint.resetFPGA()
self.write_reg(self.REG_RESET_REG, [1])
self.write_reg(self.REG_RESET_REG, [0])
self.write_reg(self.REG_COUNT_WRITES, [1])
def load_bitstream(self, bitfile):
"""Load bitstream onto FPGA"""
if not os.path.isfile(bitfile):
raise ValueError("Cannot find specified bitfile {}".format(bitfile))
bitstream = open(bitfile, "rb")
self._llint.FPGAProgram(bitstream)
pass
def auto_program(self):
""" Erases the firmware of the onboard SAM3U, and reprograms it with default firmware
Attempts to autodetect the COM PORT that the SAM3U shows up as. If this fails, it will
be necessary to flash new firmware via :code:`program_sam3u`
"""
import time, serial.tools.list_ports
before = serial.tools.list_ports.comports()
before = [b.device for b in before]
time.sleep(0.5)
self.erase_sam3u()
time.sleep(1.5)
after = serial.tools.list_ports.comports()
after = [a.device for a in after]
candidate = list(set(before) ^ set(after))
if len(candidate) == 0:
raise OSError("Could not detect COMPORT. Continue using programmer.program()")
com = candidate[0]
print("Detected com port {}".format(com))
self.program_sam3u(com)
def erase_sam3u(self):
"""Erase the SAM3U Firmware, which forces it into bootloader mode."""
self._llint.eraseFW(confirm=True)
def program_sam3u(self, port, fw_path=None):
"""Program the SAM3U Firmware assuming device is in bootloader mode.
Args:
port (str): Serial port name, such as 'COM36' or '/dev/ttyACM0'.
fw_path (str): Path to firmware binary to program the sam3u with.
If None, use default firmware. Defautls to None.
"""
fw_data = None
print("Opening firmware...")
if fw_path is None:
print("Firmware not specified. Using firmware/phywhisperer.py")
fw_data = getsome("phywhisperer-SAM3U1C.bin").read()
else:
if not os.path.isfile(fw_path):
raise ValueError("Cannot find specified firmware file {}".format(fw_path))
fw_data = open(fw_path, "rb").read()
sam = Samba()
print("Opened!\nConnecting...")
sam.con(port)
print("Connected!\nErasing...")
sam.erase()
print("Erased!\nProgramming file {}...".format(fw_path))
sam.write(fw_data)
print("Programmed!\nVerifying...")
if sam.verify(fw_data):
print("Verify OK!")
sam.flash.setBootFlash(True)
print("Bootloader disabled. Please power cycle device.")
else:
print("Verify FAILED!")
sam.ser.close()
def set_usb_mode(self, mode='auto'):
"""Set USB PHY speed.
Args:
mode (str):
* "LS": manually set the PHY to low speed.
* "FS": manually set the PHY to full speed.
* "HS": manually set the PHY to high speed.
* "auto": Default. PW will attempt to automatically determine the
speed when the target is connected. Mode must be set to
'auto' prior to connecting or powering up the target,
otherwise speed cannot be determined correctly. Setting
the mode to 'auto' actively causes PW to try to
determine the speed.
"""
if mode == 'auto':
self.write_reg(self.REG_USB_SPEED, [self.USB_SPEED_AUTO])
elif mode == 'LS':
self.write_reg(self.REG_USB_SPEED, [self.USB_SPEED_LS])
elif mode == 'FS':
self.write_reg(self.REG_USB_SPEED, [self.USB_SPEED_FS])
elif mode == 'HS':
self.write_reg(self.REG_USB_SPEED, [self.USB_SPEED_HS])
else:
raise ValueError('Invalid mode %s; specify auto, LS, FS, or HS.' % mode)
pass
def write_reg(self, address, data):
"""Write a PhyWhisperer register.
Args:
address: int
data: bytes
"""
return self.usb.cmdWriteMem(address, data)
def read_reg(self, address, size=1):
"""Reads a PhyWhisperer register.
Args:
address: int
size: int, number of bytes to read
Returns:
"""
return self.usb.cmdReadMem(address, size)
def get_usb_mode(self):
"""Returns USB PHY speed.
Return values:
- 'auto': the speed has not been determined yet (was the mode set
to 'auto' _before_ the target was connected or powered up?).
- 'LS': low speed
- 'FS': full speed
- 'HS: high speed
"""
value = self.read_reg(self.REG_USB_SPEED)[0]
if value == self.USB_SPEED_AUTO:
return 'auto'
elif value == self.USB_SPEED_LS:
return 'LS'
elif value == self.USB_SPEED_FS:
return 'FS'
elif value == self.USB_SPEED_HS:
return 'HS'
else:
raise ValueError('Internal error: REG_USB_SPEED register contains invalid value %d.' % value)
def read_capture_data(self, entries=0, verbose=False, blocking=False, burst_size=8192, timeout=5):
"""Read from USB capture memory.
Args:
blocking (bool, optional):
* True: wait for data to be available before reading (slower).
* False: read immediately, with underflow protection, all of the captured
data, until PW tells us we've read everything that it captured ('entries' is ignored).
entries (int, optional): When blocking=True, number of capture entries to read. If not specified,
read all the captured data. Cannot be greater than capture size, as set
by set_capture_size().
burst_size (int, optional): When blocking=False, size of burst FIFO reads, defaults to 8192.
timeout (int, optional): timeout in seconds (ignored if 0, defaults to 5)
verbose (bool, optional): Print extra debug info.
Returns: List of captured entries. Each list element is itself a 3-element list,
containing the 3 bytes that make up a capture entry. Can be parsed by split_packets()
or split_data(). See software/phywhisperer/firmware/defines.v for definition of the FIFO
data fields.
"""
data = []
starttime = time.time()
self.entries_captured = 0
if blocking:
entries_read = 0
if not entries:
entries = self.capture_size
elif entries > self.capture_size:
raise ValueError('Error: requested to read %d entries but only %d were captured.' % (entries, self.capture_size))
while entries_read < entries:
while self.fifo_empty():
if timeout and time.time() - starttime > timeout:
logging.warning("Capture timed out!")
break
data.append(self.read_reg(self.REG_SNIFF_FIFO_RD, 4)[1:4])
entries_read += 1
else:
notdone = True
early_exit = False
raw = []
while notdone:
raw.extend(self.read_reg(self.REG_SNIFF_FIFO_RD, 4*burst_size))
# check CAPTURE_DONE and EMPTY flags on last entry read:
bitmask = 2**self.FE_FIFO_STAT_CAPTURE_DONE + 2**self.FE_FIFO_STAT_EMPTY
if raw[-1] & bitmask == bitmask:
notdone = False
# did we also overflow?
if raw[-1] & 2**self.FE_FIFO_STAT_OVERFLOW_BLOCKED:
logging.warning("FIFO overflowed, capture stopped.")
early_exit = True
elif timeout and time.time() - starttime > timeout:
logging.warning("Capture timed out!")
notdone = False
early_exit = True
# reformat the return array and at the same time, filter out the (possibly numerous) empty FIFO reads:
for i in range(int(len(raw)/4)):
if raw[i*4+3] & 3 != self.FE_FIFO_CMD_STRM:
data.append(raw[i*4+1:i*4+4])
self.entries_captured = len(data)
if early_exit:
logging.warning("%d entries captured." % self.entries_captured)
if len(data): # maybe we only got empty reads
if data[-1][2] & 2**self.FE_FIFO_STAT_UNDERFLOW:
logging.warning("Capture FIFO underflowed!")
return data
def split_data(self, rawdata, verbose=False):
"""Split raw USB capture data into data events and times, stat events and times.
Args:
rawdata: list of lists, e.g. obtained from read_capture_data()
Returns:
4-tuple of lists:
0. data event times
1. data bytes corresponding to data event times
2. USB status update times
3. USB status bytes corresponding to status update times
"""
timestep = 0
data_bytes = []
data_times = []
stat_bytes = []
stat_times = []
last_flags = 0xff
for raw in rawdata:
command = raw[2] & 0x3
if (command == self.FE_FIFO_CMD_DATA):
data = raw[1]
ts = raw[0] & 0x7
self.short_timestamps[ts] += 1
timestep += ts
flags = (raw[0] & 0xf8) >> 3
if verbose:
print("%8d flags=%02x data=%02x"%(timestep, flags, data))
# only log flags if they've changed:
if flags != last_flags:
stat_bytes.append(flags)
stat_times.append(timestep)
last_flags = flags
data_bytes.append(data)
data_times.append(timestep)
elif (command == self.FE_FIFO_CMD_STAT):
ts = raw[0] & 0x7
self.short_timestamps[ts] += 1
timestep += ts
flags = (raw[0] & 0xf8) >> 3
if verbose:
print("%8d flags=%02x"%(timestep, flags))
stat_bytes.append(flags)
stat_times.append(timestep)
last_flags = flags
elif (command == self.FE_FIFO_CMD_TIME):
ts = raw[0] + (raw[1] << 8)
self.long_timestamps[ts] += 1
#Unlike stat and data commands, we don't add one here; if we did
#we'd be overcounting in the common case where a time command immediately
#preceeds a stat or data command. Consequence is that timestep will be off
#by one in the case of lone time commands (which is rare, and inconsequential
#in practice).
timestep += ts
if verbose:
print("%8d" % timestep)
elif (command == self.FE_FIFO_CMD_STRM):
# nothing to do or report
# CAUTION: don't even print a status in verbose mode because we can be
# receiving TONS of these!
pass
else:
print ("ERROR: unknown command (%d)" % command)
return (data_times, data_bytes, stat_times, stat_bytes)
def split_packets(self, rawdata):
"""Split raw USB capture data into packets.
Args:
rawdata: list of lists, e.g. obtained from read_capture_data()
Returns:
list
Each list element is one packet and is presented in a dictionary with the following keys:
* 'timestamp'
* 'size' in bytes
* 'contents' list of bytes
"""
# operates destructively so make a copy:
rawdata_copy = rawdata[:]
handler = PWPacketHandler()
packets = []
incomplete = False
while rawdata_copy and not incomplete:
# use ViewSB code to avoid duplicating it here:
try:
packets.append(handler.handle_bytes_received(defines=self, data=rawdata_copy))
except IncompletePacket:
incomplete = True
continue
return packets
def print_packets(self, packets):
"""Print packets using USBSimplePrintSink from ViewSB.
Args:
packets: list of dictionaries, e.g. obtained from split_packets()
"""
printer = USBSimplePrintSink(highspeed=self.get_usb_mode() == 'HS')
for packet in packets:
printer.handle_usb_packet(ts=packet['timestamp'], buf=bytearray(packet['contents']), flags=(packet['flags']))
@staticmethod
def print_flags(stat_byte):
"""Print bitfields of USB status flags byte.
"""
print('vbus_valid = %d' % (1 if stat_byte & 0x10 else 0))
print('sess_end = %d' % (1 if stat_byte & 0x08 else 0))
print('sess_valid = %d' % (1 if stat_byte & 0x04 else 0))
print('rx_error = %d' % (1 if stat_byte & 0x02 else 0))
print('rx_active = %d' % (1 if stat_byte & 0x01 else 0))
def set_capture_size(self, size=8188):
"""Set how many events to capture (events include data, USB status, and timestamps).
Args:
size(int, option): number of events to capture. 0 = unlimited (until overflow). Max = 2^24-1. Since the capture FIFO can hold 8188 events, setting this to > 8188 may result in overflow.
"""
if (size >= 2**24) or (size < 0):
raise ValueError('Illegal size value.')
self.capture_size = size
self.write_reg(self.REG_CAPTURE_LEN, int.to_bytes(size, length=2, byteorder='little'))
def ns_trigger(self, delay_in_ns):
"""Convert a nS number to delay or width cycles for set_trigger()"""
cycles = (float(delay_in_ns) * 1.0E-9) / (1.0 / float(self.usb_trigger_freq))
return round(cycles)
def us_trigger(self, delay_in_us):
"""Convert a uS number to delay or width cycles for set_trigger()"""
cycles = (float(delay_in_us) * 1.0E-6) / (1.0 / float(self.usb_trigger_freq))
return round(cycles)
def ms_trigger(self, delay_in_ms):
"""Convert a mS number to delay or width cycles for set_trigger()"""
cycles = (float(delay_in_ms) * 1.0E-3) / (1.0 / float(self.usb_trigger_freq))
return round(cycles)
def set_trigger(self, num_triggers=1, delays=[0], widths=[1], enable=True):
"""Program the output trigger pulse(s) delay and width. Both are measured in clock cycles of USB-derived
240 MHz clock. Note that this is a different time base than set_capture_delay(), which uses a 60 MHz
clock! Up to 8 pulses may be issued.
The capture delay is automatically set to match the trigger delay; use set_capture_delay to set it to a
different value. Use ns_trigger(), us_trigger(), and ms_trigger() to convert values as needed.
Args:
num_triggers (int): number of trigger pulses, from 1 to 8.
delay (list of ints): delay for each trigger pulse; each element in range [0, 2^20-1] cycles
(only first element can be zero).
width (list of ints): width for each trigger pulse; each element in range [1, 2^17-1] cycles.
enable (bool, optional): set to 'False' to disable trigger generation on hardware pins.
Examples:
(a) To set obtain three 2-cycle-wide pulses, each 3 cycles apart, starting immediately after a
pattern match:
set_trigger(num_triggers=3, delays=[0,3,3], widths=[2,2,2])
(b) To set obtain a 1-cycle wide pulse 10 cycles after a pattern match, followed by a 2-cycle wide
pulse 20 cycles later:
set_trigger(num_triggers=2, delays=[10,20], widths=[1,2])
"""
if num_triggers > 8:
raise ValueError('Maximum 8 trigger pulses.')
if len(delays) != num_triggers or len(widths) != num_triggers:
raise ValueError('Number of elements in delays and widths must match num_triggers.')
data = 0
for i in range(num_triggers):
delay = delays[i]
if (delay >= 2**20) or (delay < 0) or (delay < 1 and i > 0):
raise ValueError('Illegal delay value.')
data += delay << i*24
self.write_reg(self.REG_TRIGGER_DELAY, int.to_bytes(data, length=3*num_triggers, byteorder='little'))
data = 0
for i in range(num_triggers):
width = widths[i]
if (width >= 2**17) or (width < 1):
raise ValueError('Illegal width value.')
data += width << i*24
self.write_reg(self.REG_TRIGGER_WIDTH, int.to_bytes(data, length=3*num_triggers, byteorder='little'))
self.write_reg(self.REG_NUM_TRIGGERS, [num_triggers])
self.set_capture_delay(int(delay/4))
if enable == True:
self.write_reg(self.REG_TRIGGER_ENABLE, [1])
else:
self.write_reg(self.REG_TRIGGER_ENABLE, [0])
def set_capture_delay(self, delay):
"""Program the capture delay, measured in clock cycles of USB-derived 60 MHz clock.
Note that this is a different time base than set_trigger(), which uses a 240 MHz clock!
Args:
delay (int): range in [0, 2^18-1] cycles of 60 MHz clock.
"""
if (delay >= 2**18) or (delay < 0):
raise ValueError('Illegal delay value.')
self.write_reg(self.REG_CAPTURE_DELAY, int.to_bytes(delay, length=3, byteorder='little'))
def set_pattern(self, pattern, mask=None):
"""Set the pattern and its bitmask used for capture and trigger output.
Args:
pattern (list of ints): list of between 1 and 64 bytes
mask (list, optional): list of bytes, must have same size as 'pattern' if
set. Defaults to [0xff]*len(pattern) if not set.
"""
if mask is None:
mask = [0xFF] * len(pattern)
if len(pattern) != len(mask):
raise ValueError('pattern and mask must be of same size.')
elif len(pattern) > self.MAX_PATTERN_LENGTH:
raise ValueError('pattern and mask cannot be more than 64 bytes.')
# extend the mask to full width (cheaper to do here than in HW):
mask = [0]* (self.MAX_PATTERN_LENGTH - len(mask)) + mask
self.write_reg(self.REG_PATTERN, pattern[::-1])
self.write_reg(self.REG_PATTERN_MASK, mask[::-1])
self.write_reg(self.REG_PATTERN_BYTES, [len(pattern)])
self.pattern = pattern
self.mask = mask
def arm(self):
"""Arm PhyWhisperer for capture and optionally generating a trigger.
Use set_pattern to program the pattern and bitmask which will initiate
the capture and/or trigger operation.
Use set_trigger to program the trigger parameters.
Use set_capture_size and set_capture_delay to program the capture parameters.
"""
self.write_reg(self.REG_ARM, [1])
def check_fifo_errors(self, underflow=0, overflow=0):
"""Check whether an underflow or overflow occured on the capture FIFO.
Args:
underflow (int, optional): expected status, 0 or 1
overflow (int, optional): expected status, 0 or 1
"""
status = self.read_reg(self.REG_SNIFF_FIFO_STAT, 1)[0]
fifo_underflow = (status & 2) >> 1
fifo_overflow = (status & 16) >> 4
assert fifo_underflow == underflow
assert fifo_overflow == overflow
def fifo_empty(self):
"""Returns True if the capture FIFO is empty, False otherwise.
"""
if self.read_reg(self.REG_SNIFF_FIFO_STAT, 1)[0] & 1:
return True
else:
return False
def fifo_over_empty_threshold(self):
"""Returns True if the capture FIFO has more entries than the empty threshold (128).
"""
fifo_stat = self.read_reg(self.REG_SNIFF_FIFO_STAT, 1)[0]
fifo_empty = fifo_stat & 1
fifo_empty_threshold = fifo_stat & 4
if fifo_empty or fifo_empty_threshold:
return False
else:
return True
def armed(self):
"""Returns True if the PhyWhisperer is armed.
"""
if self.read_reg(self.REG_ARM, 1)[0]:
return True
else:
return False
def wait_disarmed(self):
"""Blocks until armed() returns false.
"""
while self.armed():
pass
def get_fpga_buildtime(self):
"""Returns date and time when FPGA bitfile was generated.
"""
raw = self.read_reg(self.REG_BUILDTIME, 4)
# definitions: Xilinx XAPP1232
day = raw[3] >> 3
month = ((raw[3] & 0x7) << 1) + (raw[2] >> 7)
year = ((raw[2] >> 1) & 0x3f) + 2000
hour = ((raw[2] & 0x1) << 4) + (raw[1] >> 4)
minute = ((raw[1] & 0xf) << 2) + (raw[0] >> 6)
return "FPGA build time: {}/{}/{}, {}:{}".format(month, day, year, hour, minute)
def trigger_clock_phase_shift(self, steps=1):
"""Shifts the trigger clock phase (and by extension the output trigger) in steps
of 18.6ps (18.6 ps = 1/960 MHz / 56)
Args:
steps (int): Number of steps to shift the phase (positive or negative integer).
"""
if not type(steps) == int or steps == 0:
raise ValueError('Illegal steps value, must be non-zero integer.')
if steps > 0:
value = [1]
else:
value = [0]
for i in range(abs(steps)):
self.write_reg(self.REG_TRIG_CLK_PHASE_SHIFT, value)
while (self.read_reg(self.REG_TRIG_CLK_PHASE_SHIFT, 1)[0] == 1):
# phase shift incomplete; wait:
pass
def set_stat_pattern(self, pattern, mask=0x1f):
""" Set a 5-bit pattern and mask for the USB status lines.
Args:
pattern (int): 5-bit number
mask (int): non-zero 5-bit number (default: 0x1f)
"""
if pattern < 0 or pattern > 0x1f:
raise ValueError('Illegal pattern value, must be <= 0x1f.')
if mask < 1 or mask > 0x1f:
raise ValueError('Illegal mask value, must be <= 0x1f and > 0.')
self.write_reg(self.REG_STAT_PATTERN, [pattern, mask])
def stat_pattern_matched(self):
""" Returns 1 if a stat pattern match occurred (automatically resets to 0 when armed,
and when a new match pattern is written).
Actual match value is stored in self.stat_pattern_match_value.
"""
matched, value = self.read_reg(self.REG_STAT_MATCH, 2)
self.stat_pattern_match_value = value
return matched
def register_sink(self, event_sink):
""" ViewSB: Registers a USBEventSink to receive any USB events.
Args:
event_sink (sniffer.USBEventSink): The sniffer.USBEventSink object to receive any USB events that occur.
"""
self.sniffer.register_sink(event_sink)
def _device_stop_capture(self):
# nothing to do?
pass
def run_capture(self, size=8188, burst=True, pattern=[0], mask=[0], timeout=5, statistics_callback=None, statistics_period=0.1, halt_callback=lambda _ : False, ):
""" Runs a capture for ViewSB, including power cycling the device to catch the descriptors.
Runs following internally::
self.reset_fpga()
self.set_power_source("host")
self.set_power_source("off")
time.sleep(0.5)
self.set_usb_mode("auto")
self.set_capture_size(size)
self.arm()
self.set_trigger(enable=False)
self.set_pattern(pattern=pattern, mask=mask)
self.set_power_source("host")
time.sleep(0.25)
"""
self.reset_fpga()
self.set_power_source("host")
self.set_power_source("off")
time.sleep(0.5)
self.set_usb_mode("auto")
self.set_capture_size(size)
self.arm()
self.set_trigger(enable=False)
self.set_pattern(pattern=pattern, mask=mask)
self.set_power_source("host")
time.sleep(0.25)
self.entries_captured = 0
self._start_comms_thread(burst, timeout)
elapsed_time = 0.0
try:
# Continue until the user-supplied halt condition is met.
while not halt_callback(elapsed_time):
# If we have a statistics callback, call it.
if callable(statistics_callback):
statistics_callback(self, elapsed_time)
# Wait for the next statistics-interval to occur.
time.sleep(statistics_period)
elapsed_time = elapsed_time + statistics_period
finally:
self._device_stop_capture()
def __comms_thread_body(self, burst, timeout=5, burst_size=8192):
""" ViewSB internal function that executes as our comms thread.
Args:
burst (bool): If True, read all FIFO at once, then pass on to decoder and frontend;
otherwise, read smaller chunks and process them concurrently.
burst_size (int): Number of entries to read at a time when burst=False
"""
if burst:
self.wait_disarmed()
rawdata = self.read_capture_data()
self.handle_incoming_bytes(rawdata)
else:
notdone = True
early_exit = False
starttime = time.time()
while notdone:
raw = self.read_reg(self.REG_SNIFF_FIFO_RD, 4*burst_size)
bitmask = 2**self.FE_FIFO_STAT_CAPTURE_DONE + 2**self.FE_FIFO_STAT_EMPTY
if raw[-3] & bitmask == bitmask:
notdone = False
if raw[-3] & 2**self.FE_FIFO_STAT_OVERFLOW_BLOCKED:
logging.warning("FIFO overflowed, capture stopped")
early_exit = True
elif timeout and time.time() - starttime > timeout:
logging.warning("Capture timed out!")
early_exit = True
notdone = False
# filter out the empty FIFO reads:
rawdata = []
for i in range(int(len(raw)/4)):
if raw[i*4+3] & 3 != self.FE_FIFO_CMD_STRM:
rawdata.append(raw[i*4+1:i*4+4])
self.handle_incoming_bytes(rawdata)
self.entries_captured += len(rawdata)
if early_exit:
logging.warning("%d entries captured." % self.entries_captured)
def _start_comms_thread(self, burst, timeout):
""" ViewSB: start the background thread that handles our core communication. """
self.commthread = threading.Thread(target=self.__comms_thread_body, args=[burst, timeout], daemon=True)
self.__comm_term = False
self.__comm_exc = None
self.commthread.start()
self.__comm_term = False
def close(self):
""" Terminates our connection to the PhyWhisperer device. """
if self.viewsb:
self.__comm_term = True
self.commthread.join()
self.usb.close()
|
newaetech/phywhispererusb
|
software/phywhisperer/usb.py
|
usb.py
|
py
| 34,049 |
python
|
en
|
code
| 77 |
github-code
|
6
|
[
{
"api_name": "phywhisperer.protocol.PWPacketDispatcher",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "phywhisperer.sniffer.USBSniffer",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pkg_resources.resource_filename",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pkg_resources.resource_filename",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "phywhisperer.interface.naeusb.NAEUSB",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "phywhisperer.interface.naeusb",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "phywhisperer.interface.program_fpga.PhyWhispererUSB",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "phywhisperer.interface.program_fpga",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "zipfile.ZipFile",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "phywhisperer.firmware.phywhisperer.getsome",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "serial.tools.list_ports.tools.list_ports.comports",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "serial.tools.list_ports.tools",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "serial.tools.list_ports",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "serial.tools.list_ports.tools.list_ports.comports",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "serial.tools.list_ports.tools",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "serial.tools.list_ports",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "phywhisperer.firmware.phywhisperer.getsome",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 200,
"usage_type": "attribute"
},
{
"api_name": "phywhisperer.interface.bootloader_sam3u.Samba",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "phywhisperer.protocol.PWPacketHandler",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "phywhisperer.protocol.IncompletePacket",
"line_number": 458,
"usage_type": "name"
},
{
"api_name": "phywhisperer.sniffer.USBSimplePrintSink",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 756,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 763,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 779,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 803,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 810,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 812,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 813,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 824,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 830,
"usage_type": "call"
}
] |
72132172028
|
import datetime
import re
import subprocess
import sys
from typing import Optional
def run(argv: list[str]) -> subprocess.CompletedProcess:
return subprocess.run(
argv,
capture_output=True,
encoding='utf-8'
)
def error(str: str) -> None:
sys.stderr.write("%s\n" % str)
def get_merge_commits(base: Optional[str], since: Optional[str]) -> list[str]:
argv = [
"git",
"log",
"--pretty=tformat:%h,%p"
]
if base:
argv.append("%s..HEAD" % base)
if since:
argv.append('--since=%s' % since)
completed = run(argv)
re_merge_commit = r'^([0-9a-fA-f]+),([0-9a-fA-F]+) ([0-9a-fA-F]+)$'
output = completed.stdout
lines = output.splitlines()
merge_commits = []
for line in lines:
match = re.match(re_merge_commit, line)
if match:
merge_commits.append(match.group(1))
return merge_commits
def find_matches(merge_commits: list[str], patterns: list[str]) -> list[str]:
matches = []
for commit_hash in merge_commits:
completed = run(
[
"git",
"show",
"--pretty=tformat:%s",
commit_hash
]
)
first_line = completed.stdout.splitlines()[0]
found_match = False
for pattern in patterns:
match = re.search(pattern, first_line)
if match:
matches.append(match.group(1))
found_match = True
continue
if not found_match:
error("no match: »%s«" % first_line)
return matches
def partition_args(raw: list[str]) -> tuple[list[str], dict[str, Optional[str]]]:
args = []
flags = {}
for arg in raw:
if len(arg) > 0 and arg[0] == '-':
key, *value = arg.split('=', 1)
flags[key] = value[0] if len(value) else None
else:
args.append(arg)
return (args, flags)
def parse_relative_date(raw: str) -> Optional[datetime.datetime]:
match = re.match(r'([0-9]+)[ ]*([a-z]+)', raw)
if not match:
return None
n = int(match.group(1))
if match.group(2) == 'days':
return datetime.datetime.now() - datetime.timedelta(days=n)
else:
return None
def main() -> None:
(args, flags) = partition_args(sys.argv[1:])
commit_hash = None
patterns = []
since = None
if '--today' in flags:
today = datetime.date.today()
since = '%s 00:00:00' % today.isoformat()
patterns = args
elif '--since' in flags:
raw_since = flags['--since']
if not raw_since:
raise Exception
dt = parse_relative_date(raw_since)
if not dt:
raise Exception
since = dt.isoformat()
patterns = args
elif len(args):
commit_hash, *patterns = args
merge_commits = get_merge_commits(commit_hash, since)
print("Number of merge commits: %d" % len(merge_commits))
print()
print("Merge commits:")
if not merge_commits:
print("(none)")
for commit in merge_commits:
print("- %s" % commit)
print()
if len(patterns) > 0:
print("Matches:")
matches = find_matches(merge_commits, patterns)
if not matches:
print("(none)")
for title in matches:
print("- %s" % title)
if __name__ == "__main__":
main()
|
djfo/dev-tools
|
merge_commits.py
|
merge_commits.py
|
py
| 3,438 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "subprocess.run",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "subprocess.CompletedProcess",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "re.match",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "re.match",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "datetime.date.today",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 96,
"usage_type": "attribute"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.